diff --git a/src/tokens.rs b/src/tokens.rs index 7fc58ff..340a909 100644 --- a/src/tokens.rs +++ b/src/tokens.rs @@ -172,7 +172,11 @@ impl Token { } pub fn is_unary_op(self) -> bool { match self { - Token::Plus | Token::Minus | Token::Star | Token::Ampersand | Token::Bang => true, + Token::Plus + | Token::Minus + | Token::Star + | Token::Ampersand + | Token::Bang => true, _ => false, } } @@ -264,15 +268,16 @@ impl LexemeParser { // them is represented by a single byte and we know they must be // utf8/ascii. unsafe { - char::from_u32_unchecked(tok.lexeme().unwrap().as_bytes()[self.len - 1] as u32) - == ch + char::from_u32_unchecked( + tok.lexeme().unwrap().as_bytes()[self.len - 1] as u32, + ) == ch } }); // A token has been successfully matched completely if it has not yet // been removed from the lexeme list but the length of it's lexeme is no // greater than the number of chars we've received. - self.candidates.extend(self.lexemes.extract_if(|tok| { + self.candidates.extend(self.lexemes.extract_if(.., |tok| { // SAFETY: as above, all of the tokens in self.lexemes are // lexical and are all single byte characters. tok.lexeme().unwrap().as_bytes().len() <= self.len