Add regression test for manually vendored dependencies

This commit is contained in:
Ivan Petkov 2022-01-30 14:39:06 -08:00
parent 47fbae6dc3
commit 715c8731c3
15 changed files with 6295 additions and 0 deletions

View File

@ -115,6 +115,11 @@ onlyDrvs (lib.makeScope myLib.newScope (self:
src = ./overlapping-targets;
});
smokeManuallyVendored = self.smoke [ "manually-vendored" ] (myLib.buildPackage {
src = ./manually-vendored;
cargoVendorDir = ./manually-vendored/vendor;
});
smokeWorkspace = self.smoke [ "print" ] self.workspace;
smokeWorkspaceRoot = self.smoke [ "print" ] self.workspaceRoot;

16
checks/manually-vendored/Cargo.lock generated Normal file
View File

@ -0,0 +1,16 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "byteorder"
version = "1.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
[[package]]
name = "manually-vendored"
version = "0.1.0"
dependencies = [
"byteorder",
]

View File

@ -0,0 +1,7 @@
[package]
name = "manually-vendored"
version = "0.1.0"
edition = "2021"
[dependencies]
byteorder = "*"

View File

@ -0,0 +1,3 @@
fn main() {
println!("LittleEndian: {:?}", std::any::TypeId::of::<byteorder::LittleEndian>());
}

View File

@ -0,0 +1 @@
{"files":{"CHANGELOG.md":"3a745d94ee9dce0d9dc638c02078cd5001d3d9d12d58b4f220c0101e32cfc16a","COPYING":"01c266bced4a434da0051174d6bee16a4c82cf634e2679b6155d40d75012390f","Cargo.toml":"8585455e5a0e638cf5d489a21e286e93680f835cb8a13595918b5eb7c8c7f212","LICENSE-MIT":"0f96a83840e146e43c0ec96a22ec1f392e0680e6c1226e6f3ba87e0740af850f","README.md":"9d57556868344534de2489317e3c6bb611348ecd44438dcb982bd8d2a55a5a1b","UNLICENSE":"7e12e5df4bae12cb21581ba157ced20e1986a0508dd10d0e8a4ab9a4cf94e85c","benches/bench.rs":"a80bf3cd446c9b6c0cca3865c4de047bdf4644b74cdf696822f8ff87adfa1fca","rustfmt.toml":"1ca600239a27401c4a43f363cf3f38183a212affc1f31bff3ae93234bbaec228","src/io.rs":"9612530634d0e7ce9887a23836b58c0d972c1f45b05d9ada8355961567075627","src/lib.rs":"813ce6a8beafee3fd4e63325d783108aa02e8c57e412bc97580191d84082fbc9"},"package":"14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"}

View File

@ -0,0 +1,139 @@
1.3.4
=====
This patch release squashes deprecation warnings for the `try!` macro, in
accordance with byteorder's minimum supported Rust version (currently at Rust
1.12.0).
1.3.3
=====
This patch release adds `ByteOrder::write_i8_into()` as a simple, safe interface
for ordinarily unsafe or tedious code.
1.3.2
=====
This patch release adds `ReadBytesExt::read_i8_into()` as a simple, safe interface
for ordinarily unsafe or tedious code.
1.3.1
=====
This minor release performs mostly small internal changes. Going forward, these
are not going to be incorporated into the changelog.
1.3.0
=====
This new minor release now enables `i128` support automatically on Rust
compilers that support 128-bit integers. The `i128` feature is now a no-op, but
continues to exist for backward compatibility purposes. The crate continues to
maintain compatibility with Rust 1.12.0.
This release also deprecates the `ByteOrder` trait methods
`read_f32_into_unchecked` and `read_f64_into_unchecked` in favor of
`read_f32_into` and `read_f64_into`. This was an oversight from the 1.2 release
where the corresponding methods on `ReadBytesExt` were deprecated.
`quickcheck` and `rand` were bumped to `0.8` and `0.6`, respectively.
A few small documentation related bugs have been fixed.
1.2.7
=====
This patch release excludes some CI files from the crate release and updates
the license field to use `OR` instead of `/`.
1.2.6
=====
This patch release fixes some test compilation errors introduced by an
over-eager release of 1.2.5.
1.2.5
=====
This patch release fixes some typos in the docs, adds doc tests to methods on
`WriteByteExt` and bumps the quickcheck dependency to `0.7`.
1.2.4
=====
This patch release adds support for 48-bit integers by adding the following
methods to the `ByteOrder` trait: `read_u48`, `read_i48`, `write_u48` and
`write_i48`. Corresponding methods have been added to the `ReadBytesExt` and
`WriteBytesExt` traits as well.
1.2.3
=====
This patch release removes the use of `feature(i128_type)` from byteorder,
since it has been stabilized. We leave byteorder's `i128` feature in place
in order to continue supporting compilation on older versions of Rust.
1.2.2
=====
This patch release only consists of internal improvements and refactorings.
Notably, this removes all uses of `transmute` and instead uses pointer casts.
1.2.1
=====
This patch release removes more unnecessary uses of `unsafe` that
were overlooked in the prior `1.2.0` release. In particular, the
`ReadBytesExt::read_{f32,f64}_into_checked` methods have been deprecated and
replaced by more appropriately named `read_{f32,f64}_into` methods.
1.2.0
=====
The most prominent change in this release of `byteorder` is the removal of
unnecessary signaling NaN masking, and in turn, the `unsafe` annotations
associated with methods that didn't do masking. See
[#103](https://github.com/BurntSushi/byteorder/issues/103)
for more details.
* [BUG #102](https://github.com/BurntSushi/byteorder/issues/102):
Fix big endian tests.
* [BUG #103](https://github.com/BurntSushi/byteorder/issues/103):
Remove sNaN masking.
1.1.0
=====
This release of `byteorder` features a number of fixes and improvements, mostly
as a result of the
[Litz Blitz evaluation](https://public.etherpad-mozilla.org/p/rust-crate-eval-byteorder).
Feature enhancements:
* [FEATURE #63](https://github.com/BurntSushi/byteorder/issues/63):
Add methods for reading/writing slices of numbers for a specific
endianness.
* [FEATURE #65](https://github.com/BurntSushi/byteorder/issues/65):
Add support for `u128`/`i128` types. (Behind the nightly only `i128`
feature.)
* [FEATURE #72](https://github.com/BurntSushi/byteorder/issues/72):
Add "panics" and "errors" sections for each relevant public API item.
* [FEATURE #74](https://github.com/BurntSushi/byteorder/issues/74):
Add CI badges to Cargo.toml.
* [FEATURE #75](https://github.com/BurntSushi/byteorder/issues/75):
Add more examples to public API items.
* Add 24-bit read/write methods.
* Add `BE` and `LE` type aliases for `BigEndian` and `LittleEndian`,
respectively.
Bug fixes:
* [BUG #68](https://github.com/BurntSushi/byteorder/issues/68):
Panic in {BigEndian,LittleEndian}::default.
* [BUG #69](https://github.com/BurntSushi/byteorder/issues/69):
Seal the `ByteOrder` trait to prevent out-of-crate implementations.
* [BUG #71](https://github.com/BurntSushi/byteorder/issues/71):
Guarantee that the results of `read_f32`/`read_f64` are always defined.
* [BUG #73](https://github.com/BurntSushi/byteorder/issues/73):
Add crates.io categories.
* [BUG #77](https://github.com/BurntSushi/byteorder/issues/77):
Add `html_root` doc attribute.

View File

@ -0,0 +1,3 @@
This project is dual-licensed under the Unlicense and MIT licenses.
You may use this code under the terms of either license.

View File

@ -0,0 +1,43 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
edition = "2018"
name = "byteorder"
version = "1.4.3"
authors = ["Andrew Gallant <jamslam@gmail.com>"]
exclude = ["/ci/*"]
description = "Library for reading/writing numbers in big-endian and little-endian."
homepage = "https://github.com/BurntSushi/byteorder"
documentation = "https://docs.rs/byteorder"
readme = "README.md"
keywords = ["byte", "endian", "big-endian", "little-endian", "binary"]
categories = ["encoding", "parsing", "no-std"]
license = "Unlicense OR MIT"
repository = "https://github.com/BurntSushi/byteorder"
[profile.bench]
opt-level = 3
[lib]
name = "byteorder"
bench = false
[dev-dependencies.quickcheck]
version = "0.9.2"
default-features = false
[dev-dependencies.rand]
version = "0.7"
[features]
default = ["std"]
i128 = []
std = []

View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2015 Andrew Gallant
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -0,0 +1,63 @@
byteorder
=========
This crate provides convenience methods for encoding and decoding
numbers in either big-endian or little-endian order.
[![Build status](https://github.com/BurntSushi/byteorder/workflows/ci/badge.svg)](https://github.com/BurntSushi/byteorder/actions)
[![](https://meritbadge.herokuapp.com/byteorder)](https://crates.io/crates/byteorder)
Dual-licensed under MIT or the [UNLICENSE](https://unlicense.org/).
### Documentation
https://docs.rs/byteorder
### Installation
This crate works with Cargo and is on
[crates.io](https://crates.io/crates/byteorder). Add it to your `Cargo.toml`
like so:
```toml
[dependencies]
byteorder = "1"
```
If you want to augment existing `Read` and `Write` traits, then import the
extension methods like so:
```rust
use byteorder::{ReadBytesExt, WriteBytesExt, BigEndian, LittleEndian};
```
For example:
```rust
use std::io::Cursor;
use byteorder::{BigEndian, ReadBytesExt};
let mut rdr = Cursor::new(vec![2, 5, 3, 0]);
// Note that we use type parameters to indicate which kind of byte order
// we want!
assert_eq!(517, rdr.read_u16::<BigEndian>().unwrap());
assert_eq!(768, rdr.read_u16::<BigEndian>().unwrap());
```
### `no_std` crates
This crate has a feature, `std`, that is enabled by default. To use this crate
in a `no_std` context, add the following to your `Cargo.toml`:
```toml
[dependencies]
byteorder = { version = "1", default-features = false }
```
### Alternatives
Note that as of Rust 1.32, the standard numeric types provide built-in methods
like `to_le_bytes` and `from_le_bytes`, which support some of the same use
cases.

View File

@ -0,0 +1,24 @@
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>

View File

@ -0,0 +1,324 @@
#![feature(test)]
extern crate test;
macro_rules! bench_num {
($name:ident, $read:ident, $bytes:expr, $data:expr) => {
mod $name {
use byteorder::{
BigEndian, ByteOrder, LittleEndian, NativeEndian,
};
use test::black_box as bb;
use test::Bencher;
const NITER: usize = 100_000;
#[bench]
fn read_big_endian(b: &mut Bencher) {
let buf = $data;
b.iter(|| {
for _ in 0..NITER {
bb(BigEndian::$read(&buf, $bytes));
}
});
}
#[bench]
fn read_little_endian(b: &mut Bencher) {
let buf = $data;
b.iter(|| {
for _ in 0..NITER {
bb(LittleEndian::$read(&buf, $bytes));
}
});
}
#[bench]
fn read_native_endian(b: &mut Bencher) {
let buf = $data;
b.iter(|| {
for _ in 0..NITER {
bb(NativeEndian::$read(&buf, $bytes));
}
});
}
}
};
($ty:ident, $max:ident,
$read:ident, $write:ident, $size:expr, $data:expr) => {
mod $ty {
use byteorder::{
BigEndian, ByteOrder, LittleEndian, NativeEndian,
};
use std::$ty;
use test::black_box as bb;
use test::Bencher;
const NITER: usize = 100_000;
#[bench]
fn read_big_endian(b: &mut Bencher) {
let buf = $data;
b.iter(|| {
for _ in 0..NITER {
bb(BigEndian::$read(&buf));
}
});
}
#[bench]
fn read_little_endian(b: &mut Bencher) {
let buf = $data;
b.iter(|| {
for _ in 0..NITER {
bb(LittleEndian::$read(&buf));
}
});
}
#[bench]
fn read_native_endian(b: &mut Bencher) {
let buf = $data;
b.iter(|| {
for _ in 0..NITER {
bb(NativeEndian::$read(&buf));
}
});
}
#[bench]
fn write_big_endian(b: &mut Bencher) {
let mut buf = $data;
let n = $ty::$max;
b.iter(|| {
for _ in 0..NITER {
bb(BigEndian::$write(&mut buf, n));
}
});
}
#[bench]
fn write_little_endian(b: &mut Bencher) {
let mut buf = $data;
let n = $ty::$max;
b.iter(|| {
for _ in 0..NITER {
bb(LittleEndian::$write(&mut buf, n));
}
});
}
#[bench]
fn write_native_endian(b: &mut Bencher) {
let mut buf = $data;
let n = $ty::$max;
b.iter(|| {
for _ in 0..NITER {
bb(NativeEndian::$write(&mut buf, n));
}
});
}
}
};
}
bench_num!(u16, MAX, read_u16, write_u16, 2, [1, 2]);
bench_num!(i16, MAX, read_i16, write_i16, 2, [1, 2]);
bench_num!(u32, MAX, read_u32, write_u32, 4, [1, 2, 3, 4]);
bench_num!(i32, MAX, read_i32, write_i32, 4, [1, 2, 3, 4]);
bench_num!(u64, MAX, read_u64, write_u64, 8, [1, 2, 3, 4, 5, 6, 7, 8]);
bench_num!(i64, MAX, read_i64, write_i64, 8, [1, 2, 3, 4, 5, 6, 7, 8]);
bench_num!(f32, MAX, read_f32, write_f32, 4, [1, 2, 3, 4]);
bench_num!(f64, MAX, read_f64, write_f64, 8, [1, 2, 3, 4, 5, 6, 7, 8]);
bench_num!(uint_1, read_uint, 1, [1]);
bench_num!(uint_2, read_uint, 2, [1, 2]);
bench_num!(uint_3, read_uint, 3, [1, 2, 3]);
bench_num!(uint_4, read_uint, 4, [1, 2, 3, 4]);
bench_num!(uint_5, read_uint, 5, [1, 2, 3, 4, 5]);
bench_num!(uint_6, read_uint, 6, [1, 2, 3, 4, 5, 6]);
bench_num!(uint_7, read_uint, 7, [1, 2, 3, 4, 5, 6, 7]);
bench_num!(uint_8, read_uint, 8, [1, 2, 3, 4, 5, 6, 7, 8]);
bench_num!(int_1, read_int, 1, [1]);
bench_num!(int_2, read_int, 2, [1, 2]);
bench_num!(int_3, read_int, 3, [1, 2, 3]);
bench_num!(int_4, read_int, 4, [1, 2, 3, 4]);
bench_num!(int_5, read_int, 5, [1, 2, 3, 4, 5]);
bench_num!(int_6, read_int, 6, [1, 2, 3, 4, 5, 6]);
bench_num!(int_7, read_int, 7, [1, 2, 3, 4, 5, 6, 7]);
bench_num!(int_8, read_int, 8, [1, 2, 3, 4, 5, 6, 7, 8]);
bench_num!(
u128,
MAX,
read_u128,
write_u128,
16,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
);
bench_num!(
i128,
MAX,
read_i128,
write_i128,
16,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
);
bench_num!(uint128_1, read_uint128, 1, [1]);
bench_num!(uint128_2, read_uint128, 2, [1, 2]);
bench_num!(uint128_3, read_uint128, 3, [1, 2, 3]);
bench_num!(uint128_4, read_uint128, 4, [1, 2, 3, 4]);
bench_num!(uint128_5, read_uint128, 5, [1, 2, 3, 4, 5]);
bench_num!(uint128_6, read_uint128, 6, [1, 2, 3, 4, 5, 6]);
bench_num!(uint128_7, read_uint128, 7, [1, 2, 3, 4, 5, 6, 7]);
bench_num!(uint128_8, read_uint128, 8, [1, 2, 3, 4, 5, 6, 7, 8]);
bench_num!(uint128_9, read_uint128, 9, [1, 2, 3, 4, 5, 6, 7, 8, 9]);
bench_num!(uint128_10, read_uint128, 10, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
bench_num!(uint128_11, read_uint128, 11, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]);
bench_num!(
uint128_12,
read_uint128,
12,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
);
bench_num!(
uint128_13,
read_uint128,
13,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
);
bench_num!(
uint128_14,
read_uint128,
14,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
);
bench_num!(
uint128_15,
read_uint128,
15,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
);
bench_num!(
uint128_16,
read_uint128,
16,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
);
bench_num!(int128_1, read_int128, 1, [1]);
bench_num!(int128_2, read_int128, 2, [1, 2]);
bench_num!(int128_3, read_int128, 3, [1, 2, 3]);
bench_num!(int128_4, read_int128, 4, [1, 2, 3, 4]);
bench_num!(int128_5, read_int128, 5, [1, 2, 3, 4, 5]);
bench_num!(int128_6, read_int128, 6, [1, 2, 3, 4, 5, 6]);
bench_num!(int128_7, read_int128, 7, [1, 2, 3, 4, 5, 6, 7]);
bench_num!(int128_8, read_int128, 8, [1, 2, 3, 4, 5, 6, 7, 8]);
bench_num!(int128_9, read_int128, 9, [1, 2, 3, 4, 5, 6, 7, 8, 9]);
bench_num!(int128_10, read_int128, 10, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
bench_num!(int128_11, read_int128, 11, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]);
bench_num!(
int128_12,
read_int128,
12,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
);
bench_num!(
int128_13,
read_int128,
13,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
);
bench_num!(
int128_14,
read_int128,
14,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
);
bench_num!(
int128_15,
read_int128,
15,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
);
bench_num!(
int128_16,
read_int128,
16,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
);
macro_rules! bench_slice {
($name:ident, $numty:ty, $read:ident, $write:ident) => {
mod $name {
use std::mem::size_of;
use byteorder::{BigEndian, ByteOrder, LittleEndian};
use rand::distributions;
use rand::{self, Rng};
use test::Bencher;
#[bench]
fn read_big_endian(b: &mut Bencher) {
let mut numbers: Vec<$numty> = rand::thread_rng()
.sample_iter(&distributions::Standard)
.take(100000)
.collect();
let mut bytes = vec![0; numbers.len() * size_of::<$numty>()];
BigEndian::$write(&numbers, &mut bytes);
b.bytes = bytes.len() as u64;
b.iter(|| {
BigEndian::$read(&bytes, &mut numbers);
});
}
#[bench]
fn read_little_endian(b: &mut Bencher) {
let mut numbers: Vec<$numty> = rand::thread_rng()
.sample_iter(&distributions::Standard)
.take(100000)
.collect();
let mut bytes = vec![0; numbers.len() * size_of::<$numty>()];
LittleEndian::$write(&numbers, &mut bytes);
b.bytes = bytes.len() as u64;
b.iter(|| {
LittleEndian::$read(&bytes, &mut numbers);
});
}
#[bench]
fn write_big_endian(b: &mut Bencher) {
let numbers: Vec<$numty> = rand::thread_rng()
.sample_iter(&distributions::Standard)
.take(100000)
.collect();
let mut bytes = vec![0; numbers.len() * size_of::<$numty>()];
b.bytes = bytes.len() as u64;
b.iter(|| {
BigEndian::$write(&numbers, &mut bytes);
});
}
#[bench]
fn write_little_endian(b: &mut Bencher) {
let numbers: Vec<$numty> = rand::thread_rng()
.sample_iter(&distributions::Standard)
.take(100000)
.collect();
let mut bytes = vec![0; numbers.len() * size_of::<$numty>()];
b.bytes = bytes.len() as u64;
b.iter(|| {
LittleEndian::$write(&numbers, &mut bytes);
});
}
}
};
}
bench_slice!(slice_u64, u64, read_u64_into, write_u64_into);

View File

@ -0,0 +1,2 @@
max_width = 79
use_small_heuristics = "max"

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff