Skip to content

Commit

Permalink
Remove deprecated code
Browse files Browse the repository at this point in the history
  • Loading branch information
bluk committed Nov 20, 2023
1 parent 59b9f6f commit dcacf6a
Show file tree
Hide file tree
Showing 4 changed files with 0 additions and 220 deletions.
4 changes: 0 additions & 4 deletions maybe_xml/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -115,10 +115,6 @@ pub mod token;

pub use read::{IntoIter, Iter, Reader};

#[doc(hidden)]
#[deprecated(since = "0.9.0", note = "Use Reader type instead.")]
pub use read::Reader as Lexer;

enum QuoteState {
None,
Single,
Expand Down
120 changes: 0 additions & 120 deletions maybe_xml/src/read.rs
Original file line number Diff line number Diff line change
Expand Up @@ -89,97 +89,6 @@ pub struct Reader<'a> {
}

impl<'a> Reader<'a> {
/// Creates a new instance from a byte slice.
///
/// # Safety
///
/// The bytes are assumed to represent a valid UTF-8 string. If the bytes
/// are not UTF-8, then any methods called on this type are undefined.
///
/// # Example
///
/// ```
/// use maybe_xml::{Reader, token::{Characters, EndTag, StartTag, Ty}};
///
/// let mut buf = Vec::new();
/// // Note the missing closing tag character `>` in the end tag.
/// buf.extend(b"<id>123</id");
///
/// let reader = unsafe { Reader::from_slice_unchecked(&buf) };
/// let mut pos = 0;
///
/// let token = reader.tokenize(&mut pos);
/// if let Some(Ty::StartTag(tag)) = token.map(|t| t.ty()) {
/// assert_eq!("id", tag.name().local().as_str());
/// assert_eq!(None, tag.name().namespace_prefix());
/// } else {
/// panic!();
/// }
///
/// // Position was assigned to the index after the end of the token
/// assert_eq!(4, pos);
///
/// let token = reader.tokenize(&mut pos);
/// if let Some(Ty::Characters(chars)) = token.map(|t| t.ty()) {
/// assert_eq!("123", chars.content().as_str());
/// } else {
/// panic!();
/// }
///
/// // Position was assigned to the index after the end of the token
/// assert_eq!(7, pos);
///
/// let token = reader.tokenize(&mut pos);
/// // The last token is incomplete because it is missing the `>`
/// assert_eq!(None, token);
///
/// // Discard the tokenized input
/// buf.drain(..pos);
/// pos = 0;
///
/// // Wait for additional input
/// buf.extend(b">");
///
/// // Start tokenizing again with the input
/// let reader = unsafe { Reader::from_slice_unchecked(&buf) };
///
/// let token = reader.tokenize(&mut pos);
/// if let Some(Ty::EndTag(tag)) = token.map(|t| t.ty()) {
/// assert_eq!("</id>", tag.as_str());
/// assert_eq!("id", tag.name().local().as_str());
/// } else {
/// panic!();
/// }
///
/// // Position was assigned to the index after the end of the token
/// assert_eq!(5, pos);
///
/// let token = reader.tokenize(&mut pos);
/// // There is no additional data to process
/// assert_eq!(None, token);
///
/// buf.drain(..pos);
/// pos = 0;
///
/// // End of file is reached while reading input
///
/// // Verify that the buffer is empty. If it is not empty, then there is data
/// // which could not be identified as a complete token. This usually indicates
/// // an error has occurred.
/// assert!(buf.is_empty());
/// ```
#[deprecated(
since = "0.9.0",
note = "Use core::str::from_utf8_unchecked() and then use Reader::from_str() instead."
)]
#[inline]
#[must_use]
pub const unsafe fn from_slice_unchecked(input: &'a [u8]) -> Self {
Self {
input: core::str::from_utf8_unchecked(input),
}
}

/// Creates a new instance with the given UTF-8 string input.
#[inline]
#[must_use]
Expand Down Expand Up @@ -506,14 +415,6 @@ mod tests {

use super::*;

#[cfg(all(feature = "alloc", not(feature = "std")))]
extern crate alloc;

#[cfg(all(feature = "alloc", not(feature = "std")))]
use alloc::vec::Vec;
#[cfg(feature = "std")]
use std::vec::Vec;

#[test]
fn none_on_empty() {
let reader = Reader::from_str("");
Expand Down Expand Up @@ -548,27 +449,6 @@ mod tests {
let _ = reader.tokenize(&mut pos);
}

#[cfg(any(feature = "std", feature = "alloc"))]
#[test]
fn text_content() {
let mut buf = Vec::new();
let mut pos = 0;
buf.extend("Hello".as_bytes());
let reader = unsafe { Reader::from_slice_unchecked(&buf) };
assert_eq!(Some("Hello"), reader.tokenize(&mut pos).map(|t| t.as_str()));
assert_eq!(buf.len(), pos);

buf.extend("wo".as_bytes());
let reader = unsafe { Reader::from_slice_unchecked(&buf) };
assert_eq!(Some("wo"), reader.tokenize(&mut pos).map(|t| t.as_str()));
assert_eq!(buf.len(), pos);

buf.extend("rld!<".as_bytes());
let reader = unsafe { Reader::from_slice_unchecked(&buf) };
assert_eq!(Some("rld!"), reader.tokenize(&mut pos).map(|t| t.as_str()));
assert_eq!(buf.len() - 1, pos);
}

fn verify_tokenize_all(input: &str, expected: &[Ty<'_>]) {
verify_tokenize(input, 0, expected, input.len());
}
Expand Down
64 changes: 0 additions & 64 deletions maybe_xml/src/token.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,38 +51,6 @@ impl<'a> Token<'a> {
self.0
}

/// The token represented as a str.
///
/// # Errors
///
/// If the bytes are not a UTF-8 string.
#[deprecated(since = "0.8.0", note = "Use as_str() instead.")]
#[inline]
pub fn to_str(&self) -> Result<&'a str, core::str::Utf8Error> {
Ok(self.as_str())
}

/// The token represented as a str.
///
/// # Safety
///
/// The underlying bytes are assumed to be UTF-8. If the bytes are
/// not valid UTF-8, then the behavior is undefined.
#[deprecated(since = "0.8.0", note = "Use as_str() instead.")]
#[inline]
#[must_use]
pub const unsafe fn as_str_unchecked(&self) -> &'a str {
self.as_str()
}

/// Returns the underlying slice.
#[deprecated(since = "0.8.0", note = "Use as_bytes() instead.")]
#[inline]
#[must_use]
pub const fn into_inner(self) -> &'a [u8] {
self.as_bytes()
}

/// Returns the token type.
#[inline]
#[must_use]
Expand Down Expand Up @@ -167,38 +135,6 @@ macro_rules! converters {
pub const fn len(&self) -> usize {
self.0.len()
}

/// The token represented as a str.
///
/// # Errors
///
/// If the bytes are not a UTF-8 string.
#[deprecated(since = "0.8.0", note = "Use as_str() instead.")]
#[inline]
pub fn to_str(&self) -> Result<&'a str, core::str::Utf8Error> {
Ok(self.as_str())
}

/// The token represented as a str.
///
/// # Safety
///
/// The underlying bytes are assumed to be UTF-8. If the bytes are
/// not valid UTF-8, then the behavior is undefined.
#[deprecated(since = "0.8.0", note = "Use as_str() instead.")]
#[inline]
#[must_use]
pub const unsafe fn as_str_unchecked(&self) -> &'a str {
self.as_str()
}

/// Returns the underlying slice.
#[deprecated(since = "0.8.0", note = "Use as_bytes() instead.")]
#[inline]
#[must_use]
pub const fn into_inner(self) -> &'a [u8] {
self.as_bytes()
}
}

impl<'a> fmt::Display for $name<'a> {
Expand Down
32 changes: 0 additions & 32 deletions maybe_xml/src/token/prop.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,38 +31,6 @@ macro_rules! converters {
pub const fn as_str(&self) -> &'a str {
self.0
}

/// The token property represented as a str.
///
/// # Errors
///
/// If the bytes are not a UTF-8 string.
#[deprecated(since = "0.8.0", note = "Use as_str() instead.")]
#[inline]
pub fn to_str(&self) -> Result<&'a str, str::Utf8Error> {
Ok(self.as_str())
}

/// The token property represented as a str.
///
/// # Safety
///
/// The underlying bytes are assumed to be UTF-8. If the bytes are
/// not valid UTF-8, then the behavior is undefined.
#[deprecated(since = "0.8.0", note = "Use as_str() instead.")]
#[inline]
#[must_use]
pub const unsafe fn as_str_unchecked(&self) -> &'a str {
self.as_str()
}

/// Returns the underlying slice.
#[deprecated(since = "0.8.0", note = "Use as_bytes() instead.")]
#[inline]
#[must_use]
pub const fn into_inner(self) -> &'a [u8] {
self.as_bytes()
}
}
};
}
Expand Down

0 comments on commit dcacf6a

Please sign in to comment.