Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 6 additions & 12 deletions .github/workflows/rust.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ env:
jobs:
test:
strategy:
fail-fast: false
matrix:
os:
- ubuntu-latest
Expand All @@ -25,7 +26,7 @@ jobs:
- os: ubuntu-latest
rust: msrv
lint: 1
- rust: stable
- rust: msrv
rust-args: --all-features
runs-on: ${{ matrix.os }}
steps:
Expand All @@ -38,7 +39,7 @@ jobs:
ver="${{ matrix.rust }}"
if [ "$ver" = msrv ]; then
ver=$(cargo metadata --format-version 1 --no-deps | \
jq -r '.packages[0].rust_version')
jq -r 'first(.packages[] | select(.rust_version != null).rust_version)')
extra=(-c rustfmt -c clippy)
fi
rustup toolchain install "$ver" --profile minimal --no-self-update "${extra[@]}"
Expand All @@ -49,17 +50,10 @@ jobs:

- uses: Swatinem/rust-cache@v2

- name: Set nix to minimum version (0.24)
if: matrix.rust == 'msrv'
run: cargo update -p nix --precise 0.24.0
- run: cargo test --workspace ${{ matrix.rust-args }}

- name: cargo test
run: cargo test --workspace ${{ matrix.rust-args }}

- name: rustfmt
- run: cargo fmt --all -- --check
if: github.event_name == 'pull_request' && matrix.lint
run: cargo fmt --all -- --check

- name: clippy
- run: cargo clippy --all --tests --all-features -- -D warnings
if: github.event_name == 'pull_request' && matrix.lint
run: cargo clippy --all --tests --all-features -- -D warnings
8 changes: 4 additions & 4 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ version = "1.1.4"
authors = ["Cecile Tonglet <[email protected]>"]
license = "MIT OR Apache-2.0"
edition = "2021"
rust-version = "1.65"
rust-version = "1.85"
description = "Pure Rust library to read and modify GUID partition tables"
repository = "https://github.com/rust-disk-partition-management/gptman"
homepage = "https://github.com/rust-disk-partition-management/gptman"
Expand All @@ -18,13 +18,13 @@ categories = ["filesystem"]
name = "gptman"

[dependencies]
bincode = "1.3.1"
bincode = { version = "2.0.1", features = ["serde"] }
serde = { version = "1.0.116", features = ["derive"] }
crc = "3.0.0"
thiserror = "1.0"
thiserror = "2.0.12"

[features]
default = [ "nix" ]

[target.'cfg(target_os = "linux")'.dependencies]
nix = { version = ">= 0.24, < 0.28", default-features = false, features = ["ioctl"], optional = true }
nix = { version = "0.30", default-features = false, features = ["ioctl"], optional = true }
91 changes: 47 additions & 44 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,9 @@

#![deny(missing_docs)]

use bincode::{deserialize_from, serialize, serialize_into};
use bincode::config::legacy;
use bincode::error::{DecodeError, EncodeError};
use bincode::serde::{decode_from_std_read, encode_into_std_write, encode_to_vec};
use crc::{Crc, CRC_32_ISO_HDLC};
use serde::de::{SeqAccess, Visitor};
use serde::ser::SerializeTuple;
Expand All @@ -96,7 +98,10 @@ const MAX_ALIGN: u64 = 16384;
pub enum Error {
/// Derialization errors.
#[error("deserialization failed")]
Deserialize(#[from] bincode::Error),
Deserialize(#[from] DecodeError),
/// Serialization errors.
#[error("seserialization failed")]
Seserialize(#[from] EncodeError),
/// I/O errors.
#[error("generic I/O error")]
Io(#[from] io::Error),
Expand Down Expand Up @@ -230,11 +235,11 @@ impl GPTHeader {
///
/// The field `last_usable_lba` is not updated to reflect the actual size of the disk. You must
/// do this yourself by calling `update_from`.
pub fn read_from<R: ?Sized>(reader: &mut R) -> Result<GPTHeader>
pub fn read_from<R>(mut reader: &mut R) -> Result<GPTHeader>
where
R: Read + Seek,
R: Read + Seek + ?Sized,
{
let gpt: GPTHeader = deserialize_from(reader)?;
let gpt: GPTHeader = decode_from_std_read(&mut reader, legacy())?;

if &gpt.signature != b"EFI PART" {
return Err(Error::InvalidSignature);
Expand All @@ -258,27 +263,27 @@ impl GPTHeader {

/// Write the GPT header into a writer. This operation will update the CRC32 checksums of the
/// current struct and seek at the location `primary_lba` before trying to write to disk.
pub fn write_into<W: ?Sized>(
pub fn write_into<W>(
&mut self,
mut writer: &mut W,
sector_size: u64,
partitions: &[GPTPartitionEntry],
) -> Result<()>
where
W: Write + Seek,
W: Write + Seek + ?Sized,
{
self.update_partition_entry_array_crc32(partitions);
self.update_crc32_checksum();

writer.seek(SeekFrom::Start(self.primary_lba * sector_size))?;
serialize_into(&mut writer, &self)?;
encode_into_std_write(&self, &mut writer, legacy())?;

for i in 0..self.number_of_partition_entries {
writer.seek(SeekFrom::Start(
self.partition_entry_lba * sector_size
+ u64::from(i) * u64::from(self.size_of_partition_entry),
))?;
serialize_into(&mut writer, &partitions[i as usize])?;
encode_into_std_write(&partitions[i as usize], &mut writer, legacy())?;
}

Ok(())
Expand All @@ -288,7 +293,7 @@ impl GPTHeader {
pub fn generate_crc32_checksum(&self) -> u32 {
let mut clone = self.clone();
clone.crc32_checksum = 0;
let data = serialize(&clone).expect("could not serialize");
let data = encode_to_vec(&clone, legacy()).expect("could not serialize");
assert_eq!(data.len() as u32, clone.header_size);

Crc::<u32>::new(&CRC_32_ISO_HDLC).checksum(&data)
Expand All @@ -307,7 +312,7 @@ impl GPTHeader {
let mut digest = crc.digest();
let mut wrote = 0;
for x in partitions {
let data = serialize(&x).expect("could not serialize");
let data = encode_to_vec(x, legacy()).expect("could not serialize");
digest.update(&data);
wrote += data.len();
}
Expand All @@ -327,9 +332,9 @@ impl GPTHeader {
/// Updates the header to match the specifications of the seeker given in argument.
/// `first_usable_lba`, `last_usable_lba`, `primary_lba`, `backup_lba`, `partition_entry_lba`
/// will be updated after this operation.
pub fn update_from<S: ?Sized>(&mut self, seeker: &mut S, sector_size: u64) -> Result<()>
pub fn update_from<S>(&mut self, seeker: &mut S, sector_size: u64) -> Result<()>
where
S: Seek,
S: Seek + ?Sized,
{
let partition_array_size = (u64::from(self.number_of_partition_entries)
* u64::from(self.size_of_partition_entry)
Expand Down Expand Up @@ -527,11 +532,11 @@ impl GPTPartitionEntry {
}

/// Read a partition entry from the reader at the current position.
pub fn read_from<R: ?Sized>(reader: &mut R) -> bincode::Result<GPTPartitionEntry>
pub fn read_from<R>(mut reader: &mut R) -> std::result::Result<GPTPartitionEntry, DecodeError>
where
R: Read,
R: Read + ?Sized,
{
deserialize_from(reader)
decode_from_std_read(&mut reader, legacy())
}

/// Returns `true` if the partition entry is not used (type GUID == `[0; 16]`)
Expand Down Expand Up @@ -715,9 +720,9 @@ impl GPT {
/// let gpt = gptman::GPT::read_from(&mut f, 512)
/// .expect("could not read the partition table");
/// ```
pub fn read_from<R: ?Sized>(mut reader: &mut R, sector_size: u64) -> Result<GPT>
pub fn read_from<R>(mut reader: &mut R, sector_size: u64) -> Result<GPT>
where
R: Read + Seek,
R: Read + Seek + ?Sized,
{
use self::Error::*;

Expand Down Expand Up @@ -779,9 +784,9 @@ impl GPT {
/// let gpt_4096 = gptman::GPT::find_from(&mut f_4096)
/// .expect("could not read the partition table");
/// ```
pub fn find_from<R: ?Sized>(mut reader: &mut R) -> Result<GPT>
pub fn find_from<R>(mut reader: &mut R) -> Result<GPT>
where
R: Read + Seek,
R: Read + Seek + ?Sized,
{
use self::Error::*;

Expand Down Expand Up @@ -886,9 +891,9 @@ impl GPT {
/// gpt.write_into(&mut cur)
/// .expect("could not write GPT to disk");
/// ```
pub fn write_into<W: ?Sized>(&mut self, mut writer: &mut W) -> Result<GPTHeader>
pub fn write_into<W>(&mut self, mut writer: &mut W) -> Result<GPTHeader>
where
W: Write + Seek,
W: Write + Seek + ?Sized,
{
self.check_partition_guids()?;
self.check_partition_boundaries()?;
Expand Down Expand Up @@ -1233,9 +1238,9 @@ impl GPT {
/// starting at byte 446 and ending at byte 511. Any existing data will be overwritten.
///
/// See also: [`Self::write_bootable_protective_mbr_into`].
pub fn write_protective_mbr_into<W: ?Sized>(mut writer: &mut W, sector_size: u64) -> Result<()>
pub fn write_protective_mbr_into<W>(mut writer: &mut W, sector_size: u64) -> Result<()>
where
W: Write + Seek,
W: Write + Seek + ?Sized,
{
Self::write_protective_mbr_into_impl(&mut writer, sector_size, false)
}
Expand All @@ -1250,23 +1255,20 @@ impl GPT {
/// <div class="warning">Some systems will not consider a disk to be bootable in UEFI mode
/// if the pMBR is marked as bootable, so this should only be used if booting on legacy BIOS
/// systems is a requirement.</div>
pub fn write_bootable_protective_mbr_into<W: ?Sized>(
mut writer: &mut W,
sector_size: u64,
) -> Result<()>
pub fn write_bootable_protective_mbr_into<W>(mut writer: &mut W, sector_size: u64) -> Result<()>
where
W: Write + Seek,
W: Write + Seek + ?Sized,
{
Self::write_protective_mbr_into_impl(&mut writer, sector_size, true)
}

fn write_protective_mbr_into_impl<W: ?Sized>(
fn write_protective_mbr_into_impl<W>(
mut writer: &mut W,
sector_size: u64,
bootable: bool,
) -> Result<()>
where
W: Write + Seek,
W: Write + Seek + ?Sized,
{
let size = writer.seek(SeekFrom::End(0))? / sector_size - 1;
writer.seek(SeekFrom::Start(446))?;
Expand All @@ -1283,13 +1285,14 @@ impl GPT {
0x01, 0x00, 0x00, 0x00, // LBA of first absolute sector
])?;
// number of sectors in partition 1
serialize_into(
&mut writer,
&(if size > u64::from(u32::max_value()) {
u32::max_value()
encode_into_std_write(
if size > u64::from(u32::MAX) {
u32::MAX
} else {
size as u32
}),
},
&mut writer,
legacy(),
)?;
writer.write_all(&[0; 16])?; // partition 2
writer.write_all(&[0; 16])?; // partition 3
Expand Down Expand Up @@ -1382,7 +1385,7 @@ mod test {
}

// NOTE: testing that serializing the PartitionName (and the whole struct) works
let data1 = serialize(&partition).unwrap();
let data1 = encode_to_vec(&partition, legacy()).unwrap();
f.seek(SeekFrom::Start(
gpt.partition_entry_lba * ss
+ u64::from(i) * u64::from(gpt.size_of_partition_entry),
Expand Down Expand Up @@ -1432,7 +1435,7 @@ mod test {
assert_eq!(gpt.header.partition_entry_lba, 2);
gpt.header.crc32_checksum = 1;
cur.seek(SeekFrom::Start(gpt.sector_size)).unwrap();
serialize_into(&mut cur, &gpt.header).unwrap();
encode_into_std_write(&gpt.header, &mut cur, legacy()).unwrap();
let maybe_gpt = GPT::read_from(&mut cur, gpt.sector_size);
assert!(maybe_gpt.is_ok());
let gpt = maybe_gpt.unwrap();
Expand Down Expand Up @@ -1587,7 +1590,7 @@ mod test {

gpt.header.crc32_checksum = 1;
cur.seek(SeekFrom::Start(ss)).unwrap();
serialize_into(&mut cur, &gpt.header).unwrap();
encode_into_std_write(&gpt.header, &mut cur, legacy()).unwrap();
let maybe_gpt = GPT::read_from(&mut cur, ss);
assert!(maybe_gpt.is_ok());
let gpt = maybe_gpt.unwrap();
Expand All @@ -1608,7 +1611,7 @@ mod test {
gpt.header.crc32_checksum = 1;
let backup_lba = gpt.header.backup_lba;
cur.seek(SeekFrom::Start(ss)).unwrap();
serialize_into(&mut cur, &gpt.header).unwrap();
encode_into_std_write(&gpt.header, &mut cur, legacy()).unwrap();
let mut gpt = GPT::read_from(&mut cur, ss).unwrap();
assert!(!gpt.is_primary());
assert!(gpt.is_backup());
Expand All @@ -1628,7 +1631,7 @@ mod test {

gpt.header.crc32_checksum = 1;
cur.seek(SeekFrom::Start(ss)).unwrap();
serialize_into(&mut cur, &gpt.header).unwrap();
encode_into_std_write(&gpt.header, &mut cur, legacy()).unwrap();
let maybe_gpt = GPT::read_from(&mut cur, ss);
assert!(maybe_gpt.is_ok());
let gpt = maybe_gpt.unwrap();
Expand Down Expand Up @@ -1658,7 +1661,7 @@ mod test {

gpt.header.crc32_checksum = 1;
cur.seek(SeekFrom::Start(ss)).unwrap();
serialize_into(&mut cur, &gpt.header).unwrap();
encode_into_std_write(&gpt.header, &mut cur, legacy()).unwrap();
let maybe_gpt = GPT::read_from(&mut cur, ss);
assert!(maybe_gpt.is_ok());
let gpt = maybe_gpt.unwrap();
Expand Down Expand Up @@ -1862,8 +1865,8 @@ mod test {
}

cur.seek(SeekFrom::Start(446 + 8)).unwrap();
let first_lba: u32 = deserialize_from(&mut cur).unwrap();
let sectors: u32 = deserialize_from(&mut cur).unwrap();
let first_lba: u32 = decode_from_std_read(&mut cur, legacy()).unwrap();
let sectors: u32 = decode_from_std_read(&mut cur, legacy()).unwrap();
assert_eq!(first_lba, 1);
assert_eq!(sectors, 99);
}
Expand Down