Skip to content

Commit

Permalink
Merge branch 'main' of github.com:oxc-project/oxc into don/linter/fea…
Browse files Browse the repository at this point in the history
…t/no-unused-vars-3.2
  • Loading branch information
DonIsaac committed Jul 30, 2024
2 parents 1f29377 + 0914e47 commit 8572261
Show file tree
Hide file tree
Showing 15 changed files with 204 additions and 102 deletions.
3 changes: 2 additions & 1 deletion apps/oxlint/fixtures/typescript_eslint/eslintrc.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
{
"rules": {
"no-loss-of-precision": "off",
"@typescript-eslint/no-loss-of-precision": "error"
"@typescript-eslint/no-loss-of-precision": "error",
"@typescript-eslint/no-namespace": "warn"
}
}
63 changes: 62 additions & 1 deletion crates/oxc_ast/src/ast/literal.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,9 @@ use serde::Serialize;
#[cfg(feature = "serialize")]
use tsify::Tsify;

/// Boolean literal
///
/// <https://tc39.es/ecma262/#prod-BooleanLiteral>
#[ast(visit)]
#[derive(Debug, Clone, Hash)]
#[cfg_attr(feature = "serialize", derive(Serialize, Tsify))]
Expand All @@ -28,6 +31,9 @@ pub struct BooleanLiteral {
pub value: bool,
}

/// Null literal
///
/// <https://tc39.es/ecma262/#sec-null-literals>
#[ast(visit)]
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serialize", derive(Serialize, Tsify))]
Expand All @@ -37,31 +43,43 @@ pub struct NullLiteral {
pub span: Span,
}

/// Numeric literal
///
/// <https://tc39.es/ecma262/#sec-literals-numeric-literals>
#[ast(visit)]
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serialize", derive(Serialize, Tsify))]
#[serde(tag = "type")]
pub struct NumericLiteral<'a> {
#[serde(flatten)]
pub span: Span,
/// The value of the number, converted into base 10
pub value: f64,
/// The number as it appears in the source code
pub raw: &'a str,
/// The base representation used by the literal in the source code
#[serde(skip)]
pub base: NumberBase,
}

/// BigInt literal
#[ast(visit)]
#[derive(Debug, Hash)]
#[cfg_attr(feature = "serialize", derive(Serialize, Tsify))]
#[serde(tag = "type")]
pub struct BigIntLiteral<'a> {
#[serde(flatten)]
pub span: Span,
/// The bigint as it appears in the source code
pub raw: Atom<'a>,
/// The base representation used by the literal in the source code
#[serde(skip)]
pub base: BigintBase,
}

/// Regular expression literal
///
/// <https://tc39.es/ecma262/#sec-literals-regular-expression-literals>
#[ast(visit)]
#[derive(Debug, Clone, Hash)]
#[cfg_attr(feature = "serialize", derive(Serialize, Tsify))]
Expand All @@ -75,11 +93,16 @@ pub struct RegExpLiteral<'a> {
pub regex: RegExp<'a>,
}

/// A regular expression
///
/// <https://tc39.es/ecma262/multipage/text-processing.html#sec-regexp-regular-expression-objects>
#[ast]
#[derive(Debug, Clone, Hash)]
#[cfg_attr(feature = "serialize", derive(Serialize, Tsify))]
pub struct RegExp<'a> {
/// The regex pattern between the slashes
pub pattern: Atom<'a>,
/// Regex flags after the closing slash
pub flags: RegExpFlags,
}

Expand All @@ -88,6 +111,9 @@ pub struct RegExp<'a> {
#[cfg_attr(feature = "serialize", derive(Serialize, Tsify))]
pub struct EmptyObject;

/// String literal
///
/// <https://tc39.es/ecma262/#sec-literals-string-literals>
#[ast(visit)]
#[derive(Debug, Clone, Hash)]
#[cfg_attr(feature = "serialize", derive(Serialize, Tsify))]
Expand All @@ -99,16 +125,43 @@ pub struct StringLiteral<'a> {
}

bitflags! {
/// Regular expression flags.
///
/// <https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_expressions#advanced_searching_with_flags>
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct RegExpFlags: u8 {
/// Global flag
///
/// Causes the pattern to match multiple times.
const G = 1 << 0;
/// Ignore case flag
///
/// Causes the pattern to ignore case.
const I = 1 << 1;
/// Multiline flag
///
/// Causes `^` and `$` to match the start/end of each line.
const M = 1 << 2;
/// DotAll flag
///
/// Causes `.` to also match newlines.
const S = 1 << 3;
/// Unicode flag
///
/// Causes the pattern to treat the input as a sequence of Unicode code points.
const U = 1 << 4;
/// Sticky flag
///
/// Perform a "sticky" search that matches starting at the current position in the target string.
const Y = 1 << 5;
/// Indices flag
///
/// Causes the regular expression to generate indices for substring matches.
const D = 1 << 6;
/// v flag from `https://github.com/tc39/proposal-regexp-set-notation`
/// Unicode sets flag
///
/// Similar to the `u` flag, but also enables the `\\p{}` and `\\P{}` syntax.
/// Added by the [`v` flag proposal](https://github.com/tc39/proposal-regexp-set-notation).
const V = 1 << 7;
}
}
Expand All @@ -117,13 +170,21 @@ bitflags! {
#[wasm_bindgen::prelude::wasm_bindgen(typescript_custom_section)]
const TS_APPEND_CONTENT: &'static str = r#"
export type RegExpFlags = {
/** Global flag */
G: 1,
/** Ignore case flag */
I: 2,
/** Multiline flag */
M: 4,
/** DotAll flag */
S: 8,
/** Unicode flag */
U: 16,
/** Sticky flag */
Y: 32,
/** Indices flag */
D: 64,
/** Unicode sets flag */
V: 128
};
"#;
18 changes: 18 additions & 0 deletions crates/oxc_ast/src/ast_impl/literal.rs
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,24 @@ impl TryFrom<char> for RegExpFlags {
}
}

impl TryFrom<u8> for RegExpFlags {
type Error = u8;

fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
b'g' => Ok(Self::G),
b'i' => Ok(Self::I),
b'm' => Ok(Self::M),
b's' => Ok(Self::S),
b'u' => Ok(Self::U),
b'y' => Ok(Self::Y),
b'd' => Ok(Self::D),
b'v' => Ok(Self::V),
_ => Err(value),
}
}
}

impl fmt::Display for RegExpFlags {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.contains(Self::G) {
Expand Down
46 changes: 23 additions & 23 deletions crates/oxc_ast/src/generated/ast_builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -78,9 +78,9 @@ impl<'a> AstBuilder<'a> {
///
/// ## Parameters
/// - span: The [`Span`] covering this node
/// - value
/// - raw
/// - base
/// - value: The value of the number, converted into base 10
/// - raw: The number as it appears in the source code
/// - base: The base representation used by the literal in the source code
#[inline]
pub fn numeric_literal<S>(
self,
Expand All @@ -101,9 +101,9 @@ impl<'a> AstBuilder<'a> {
///
/// ## Parameters
/// - span: The [`Span`] covering this node
/// - value
/// - raw
/// - base
/// - value: The value of the number, converted into base 10
/// - raw: The number as it appears in the source code
/// - base: The base representation used by the literal in the source code
#[inline]
pub fn alloc_numeric_literal<S>(
self,
Expand All @@ -124,8 +124,8 @@ impl<'a> AstBuilder<'a> {
///
/// ## Parameters
/// - span: The [`Span`] covering this node
/// - raw
/// - base
/// - raw: The bigint as it appears in the source code
/// - base: The base representation used by the literal in the source code
#[inline]
pub fn big_int_literal<A>(self, span: Span, raw: A, base: BigintBase) -> BigIntLiteral<'a>
where
Expand All @@ -140,8 +140,8 @@ impl<'a> AstBuilder<'a> {
///
/// ## Parameters
/// - span: The [`Span`] covering this node
/// - raw
/// - base
/// - raw: The bigint as it appears in the source code
/// - base: The base representation used by the literal in the source code
#[inline]
pub fn alloc_big_int_literal<A>(
self,
Expand Down Expand Up @@ -312,9 +312,9 @@ impl<'a> AstBuilder<'a> {
///
/// ## Parameters
/// - span: The [`Span`] covering this node
/// - value
/// - raw
/// - base
/// - value: The value of the number, converted into base 10
/// - raw: The number as it appears in the source code
/// - base: The base representation used by the literal in the source code
#[inline]
pub fn expression_numeric_literal<S>(
self,
Expand Down Expand Up @@ -344,8 +344,8 @@ impl<'a> AstBuilder<'a> {
///
/// ## Parameters
/// - span: The [`Span`] covering this node
/// - raw
/// - base
/// - raw: The bigint as it appears in the source code
/// - base: The base representation used by the literal in the source code
#[inline]
pub fn expression_big_int_literal<A>(
self,
Expand Down Expand Up @@ -8153,9 +8153,9 @@ impl<'a> AstBuilder<'a> {
///
/// ## Parameters
/// - span: The [`Span`] covering this node
/// - value
/// - raw
/// - base
/// - value: The value of the number, converted into base 10
/// - raw: The number as it appears in the source code
/// - base: The base representation used by the literal in the source code
#[inline]
pub fn ts_enum_member_name_numeric_literal<S>(
self,
Expand Down Expand Up @@ -8293,9 +8293,9 @@ impl<'a> AstBuilder<'a> {
///
/// ## Parameters
/// - span: The [`Span`] covering this node
/// - value
/// - raw
/// - base
/// - value: The value of the number, converted into base 10
/// - raw: The number as it appears in the source code
/// - base: The base representation used by the literal in the source code
#[inline]
pub fn ts_literal_numeric_literal<S>(
self,
Expand Down Expand Up @@ -8325,8 +8325,8 @@ impl<'a> AstBuilder<'a> {
///
/// ## Parameters
/// - span: The [`Span`] covering this node
/// - raw
/// - base
/// - raw: The bigint as it appears in the source code
/// - base: The base representation used by the literal in the source code
#[inline]
pub fn ts_literal_big_int_literal<A>(
self,
Expand Down
2 changes: 1 addition & 1 deletion crates/oxc_linter/src/rules/typescript/no_namespace.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ declare_oxc_lint!(
/// declare namespace foo {}
/// ```
NoNamespace,
correctness
restriction
);

impl Rule for NoNamespace {
Expand Down
10 changes: 5 additions & 5 deletions crates/oxc_parser/src/lexer/byte_handlers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -336,12 +336,12 @@ ascii_byte_handler!(PRD(lexer) {
// /
ascii_byte_handler!(SLH(lexer) {
lexer.consume_char();
match lexer.peek() {
Some('/') => {
match lexer.peek_byte() {
Some(b'/') => {
lexer.consume_char();
lexer.skip_single_line_comment()
}
Some('*') => {
Some(b'*') => {
lexer.consume_char();
lexer.skip_multi_line_comment()
}
Expand Down Expand Up @@ -418,9 +418,9 @@ ascii_byte_handler!(QST(lexer) {
} else {
Kind::Question2
}
} else if lexer.peek() == Some('.') {
} else if lexer.peek_byte() == Some(b'.') {
// parse `?.1` as `?` `.1`
if lexer.peek2().is_some_and(|c| c.is_ascii_digit()) {
if lexer.peek_char2().is_some_and(|c| c.is_ascii_digit()) {
Kind::Question
} else {
lexer.consume_char();
Expand Down
8 changes: 4 additions & 4 deletions crates/oxc_parser/src/lexer/identifier.rs
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ impl<'a> Lexer<'a> {
/// Any number of characters can have already been consumed from `self.source` prior to it.
/// `self.source` should be positioned at start of Unicode character.
fn identifier_tail_unicode(&mut self, start_pos: SourcePosition) -> &'a str {
let c = self.peek().unwrap();
let c = self.peek_char().unwrap();
if is_identifier_part_unicode(c) {
self.consume_char();
self.identifier_tail_after_unicode(start_pos)
Expand All @@ -115,7 +115,7 @@ impl<'a> Lexer<'a> {
pub(super) fn identifier_tail_after_unicode(&mut self, start_pos: SourcePosition) -> &'a str {
// Identifier contains a Unicode chars, so probably contains more.
// So just iterate over chars now, instead of bytes.
while let Some(c) = self.peek() {
while let Some(c) = self.peek_char() {
if is_identifier_part(c) {
self.consume_char();
} else if c == '\\' {
Expand Down Expand Up @@ -177,7 +177,7 @@ impl<'a> Lexer<'a> {
// Consume chars until reach end of identifier or another escape
let chunk_start = self.source.position();
loop {
let maybe_char = self.peek();
let maybe_char = self.peek_char();
if maybe_char.is_some_and(is_identifier_part) {
self.consume_char();
continue;
Expand Down Expand Up @@ -272,7 +272,7 @@ impl<'a> Lexer<'a> {
fn private_identifier_not_ascii_id(&mut self) -> Kind {
let b = self.source.peek_byte().unwrap();
if !b.is_ascii() {
let c = self.peek().unwrap();
let c = self.peek_char().unwrap();
if is_identifier_start_unicode(c) {
let start_pos = self.source.position();
self.consume_char();
Expand Down
8 changes: 4 additions & 4 deletions crates/oxc_parser/src/lexer/jsx.rs
Original file line number Diff line number Diff line change
Expand Up @@ -61,12 +61,12 @@ impl<'a> Lexer<'a> {
/// `JSXFragment`
/// { `JSXChildExpressionopt` }
fn read_jsx_child(&mut self) -> Kind {
match self.peek() {
Some('<') => {
match self.peek_byte() {
Some(b'<') => {
self.consume_char();
Kind::LAngle
}
Some('{') => {
Some(b'{') => {
self.consume_char();
Kind::LCurly
}
Expand Down Expand Up @@ -122,7 +122,7 @@ impl<'a> Lexer<'a> {
// Unicode chars are rare in identifiers, so cold branch to keep common path for ASCII
// as fast as possible
cold_branch(|| {
while let Some(c) = self.peek() {
while let Some(c) = self.peek_char() {
if c == '-' || is_identifier_part(c) {
self.consume_char();
} else {
Expand Down
Loading

0 comments on commit 8572261

Please sign in to comment.