From 827b1280e4ea0e6c4cd582432ede28967eb6d5cb Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 12 Nov 2024 11:38:44 +0100 Subject: [PATCH 01/83] add for_each_simd_operator generator macro --- crates/wasmparser/src/lib.rs | 272 +++++++++++++++++++++++++++++++++++ 1 file changed, 272 insertions(+) diff --git a/crates/wasmparser/src/lib.rs b/crates/wasmparser/src/lib.rs index 0fa70dea2d..6cd1140df9 100644 --- a/crates/wasmparser/src/lib.rs +++ b/crates/wasmparser/src/lib.rs @@ -885,6 +885,278 @@ macro_rules! for_each_operator { }; } +/// Docs: TODO +#[macro_export] +macro_rules! for_each_simd_operator { + ($mac:ident) => { + $mac! { + // 0xFD operators + // 128-bit SIMD + // - https://github.com/webassembly/simd + // - https://webassembly.github.io/simd/core/binary/instructions.html + @simd V128Load { memarg: $crate::MemArg } => visit_v128_load (load v128) + @simd V128Load8x8S { memarg: $crate::MemArg } => visit_v128_load8x8_s (load v128) + @simd V128Load8x8U { memarg: $crate::MemArg } => visit_v128_load8x8_u (load v128) + @simd V128Load16x4S { memarg: $crate::MemArg } => visit_v128_load16x4_s (load v128) + @simd V128Load16x4U { memarg: $crate::MemArg } => visit_v128_load16x4_u (load v128) + @simd V128Load32x2S { memarg: $crate::MemArg } => visit_v128_load32x2_s (load v128) + @simd V128Load32x2U { memarg: $crate::MemArg } => visit_v128_load32x2_u (load v128) + @simd V128Load8Splat { memarg: $crate::MemArg } => visit_v128_load8_splat (load v128) + @simd V128Load16Splat { memarg: $crate::MemArg } => visit_v128_load16_splat (load v128) + @simd V128Load32Splat { memarg: $crate::MemArg } => visit_v128_load32_splat (load v128) + @simd V128Load64Splat { memarg: $crate::MemArg } => visit_v128_load64_splat (load v128) + @simd V128Load32Zero { memarg: $crate::MemArg } => visit_v128_load32_zero (load v128) + @simd V128Load64Zero { memarg: $crate::MemArg } => visit_v128_load64_zero (load v128) + @simd V128Store { memarg: $crate::MemArg } => visit_v128_store (store v128) + @simd V128Load8Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_load8_lane (load lane 16) + @simd V128Load16Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_load16_lane (load lane 8) + @simd V128Load32Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_load32_lane (load lane 4) + @simd V128Load64Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_load64_lane (load lane 2) + @simd V128Store8Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_store8_lane (store lane 16) + @simd V128Store16Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_store16_lane (store lane 8) + @simd V128Store32Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_store32_lane (store lane 4) + @simd V128Store64Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_store64_lane (store lane 2) + @simd V128Const { value: $crate::V128 } => visit_v128_const (push v128) + @simd I8x16Shuffle { lanes: [u8; 16] } => visit_i8x16_shuffle (arity 2 -> 1) + @simd I8x16ExtractLaneS { lane: u8 } => visit_i8x16_extract_lane_s (extract i32 16) + @simd I8x16ExtractLaneU { lane: u8 } => visit_i8x16_extract_lane_u (extract i32 16) + @simd I8x16ReplaceLane { lane: u8 } => visit_i8x16_replace_lane (replace i32 16) + @simd I16x8ExtractLaneS { lane: u8 } => visit_i16x8_extract_lane_s (extract i32 8) + @simd I16x8ExtractLaneU { lane: u8 } => visit_i16x8_extract_lane_u (extract i32 8) + @simd I16x8ReplaceLane { lane: u8 } => visit_i16x8_replace_lane (replace i32 8) + @simd I32x4ExtractLane { lane: u8 } => visit_i32x4_extract_lane (extract i32 4) + @simd I32x4ReplaceLane { lane: u8 } => visit_i32x4_replace_lane (replace i32 4) + @simd I64x2ExtractLane { lane: u8 } => visit_i64x2_extract_lane (extract i64 2) + @simd I64x2ReplaceLane { lane: u8 } => visit_i64x2_replace_lane (replace i64 2) + @simd F32x4ExtractLane { lane: u8 } => visit_f32x4_extract_lane (extract f32 4) + @simd F32x4ReplaceLane { lane: u8 } => visit_f32x4_replace_lane (replace f32 4) + @simd F64x2ExtractLane { lane: u8 } => visit_f64x2_extract_lane (extract f64 2) + @simd F64x2ReplaceLane { lane: u8 } => visit_f64x2_replace_lane (replace f64 2) + @simd I8x16Swizzle => visit_i8x16_swizzle (binary v128) + @simd I8x16Splat => visit_i8x16_splat (splat i32) + @simd I16x8Splat => visit_i16x8_splat (splat i32) + @simd I32x4Splat => visit_i32x4_splat (splat i32) + @simd I64x2Splat => visit_i64x2_splat (splat i64) + @simd F32x4Splat => visit_f32x4_splat (splat f32) + @simd F64x2Splat => visit_f64x2_splat (splat f64) + @simd I8x16Eq => visit_i8x16_eq (binary v128) + @simd I8x16Ne => visit_i8x16_ne (binary v128) + @simd I8x16LtS => visit_i8x16_lt_s (binary v128) + @simd I8x16LtU => visit_i8x16_lt_u (binary v128) + @simd I8x16GtS => visit_i8x16_gt_s (binary v128) + @simd I8x16GtU => visit_i8x16_gt_u (binary v128) + @simd I8x16LeS => visit_i8x16_le_s (binary v128) + @simd I8x16LeU => visit_i8x16_le_u (binary v128) + @simd I8x16GeS => visit_i8x16_ge_s (binary v128) + @simd I8x16GeU => visit_i8x16_ge_u (binary v128) + @simd I16x8Eq => visit_i16x8_eq (binary v128) + @simd I16x8Ne => visit_i16x8_ne (binary v128) + @simd I16x8LtS => visit_i16x8_lt_s (binary v128) + @simd I16x8LtU => visit_i16x8_lt_u (binary v128) + @simd I16x8GtS => visit_i16x8_gt_s (binary v128) + @simd I16x8GtU => visit_i16x8_gt_u (binary v128) + @simd I16x8LeS => visit_i16x8_le_s (binary v128) + @simd I16x8LeU => visit_i16x8_le_u (binary v128) + @simd I16x8GeS => visit_i16x8_ge_s (binary v128) + @simd I16x8GeU => visit_i16x8_ge_u (binary v128) + @simd I32x4Eq => visit_i32x4_eq (binary v128) + @simd I32x4Ne => visit_i32x4_ne (binary v128) + @simd I32x4LtS => visit_i32x4_lt_s (binary v128) + @simd I32x4LtU => visit_i32x4_lt_u (binary v128) + @simd I32x4GtS => visit_i32x4_gt_s (binary v128) + @simd I32x4GtU => visit_i32x4_gt_u (binary v128) + @simd I32x4LeS => visit_i32x4_le_s (binary v128) + @simd I32x4LeU => visit_i32x4_le_u (binary v128) + @simd I32x4GeS => visit_i32x4_ge_s (binary v128) + @simd I32x4GeU => visit_i32x4_ge_u (binary v128) + @simd I64x2Eq => visit_i64x2_eq (binary v128) + @simd I64x2Ne => visit_i64x2_ne (binary v128) + @simd I64x2LtS => visit_i64x2_lt_s (binary v128) + @simd I64x2GtS => visit_i64x2_gt_s (binary v128) + @simd I64x2LeS => visit_i64x2_le_s (binary v128) + @simd I64x2GeS => visit_i64x2_ge_s (binary v128) + @simd F32x4Eq => visit_f32x4_eq (binary v128f) + @simd F32x4Ne => visit_f32x4_ne (binary v128f) + @simd F32x4Lt => visit_f32x4_lt (binary v128f) + @simd F32x4Gt => visit_f32x4_gt (binary v128f) + @simd F32x4Le => visit_f32x4_le (binary v128f) + @simd F32x4Ge => visit_f32x4_ge (binary v128f) + @simd F64x2Eq => visit_f64x2_eq (binary v128f) + @simd F64x2Ne => visit_f64x2_ne (binary v128f) + @simd F64x2Lt => visit_f64x2_lt (binary v128f) + @simd F64x2Gt => visit_f64x2_gt (binary v128f) + @simd F64x2Le => visit_f64x2_le (binary v128f) + @simd F64x2Ge => visit_f64x2_ge (binary v128f) + @simd V128Not => visit_v128_not (unary v128) + @simd V128And => visit_v128_and (binary v128) + @simd V128AndNot => visit_v128_andnot (binary v128) + @simd V128Or => visit_v128_or (binary v128) + @simd V128Xor => visit_v128_xor (binary v128) + @simd V128Bitselect => visit_v128_bitselect (ternary v128) + @simd V128AnyTrue => visit_v128_any_true (test v128) + @simd I8x16Abs => visit_i8x16_abs (unary v128) + @simd I8x16Neg => visit_i8x16_neg (unary v128) + @simd I8x16Popcnt => visit_i8x16_popcnt (unary v128) + @simd I8x16AllTrue => visit_i8x16_all_true (test v128) + @simd I8x16Bitmask => visit_i8x16_bitmask (test v128) + @simd I8x16NarrowI16x8S => visit_i8x16_narrow_i16x8_s (binary v128) + @simd I8x16NarrowI16x8U => visit_i8x16_narrow_i16x8_u (binary v128) + @simd I8x16Shl => visit_i8x16_shl (shift v128) + @simd I8x16ShrS => visit_i8x16_shr_s (shift v128) + @simd I8x16ShrU => visit_i8x16_shr_u (shift v128) + @simd I8x16Add => visit_i8x16_add (binary v128) + @simd I8x16AddSatS => visit_i8x16_add_sat_s (binary v128) + @simd I8x16AddSatU => visit_i8x16_add_sat_u (binary v128) + @simd I8x16Sub => visit_i8x16_sub (binary v128) + @simd I8x16SubSatS => visit_i8x16_sub_sat_s (binary v128) + @simd I8x16SubSatU => visit_i8x16_sub_sat_u (binary v128) + @simd I8x16MinS => visit_i8x16_min_s (binary v128) + @simd I8x16MinU => visit_i8x16_min_u (binary v128) + @simd I8x16MaxS => visit_i8x16_max_s (binary v128) + @simd I8x16MaxU => visit_i8x16_max_u (binary v128) + @simd I8x16AvgrU => visit_i8x16_avgr_u (binary v128) + @simd I16x8ExtAddPairwiseI8x16S => visit_i16x8_extadd_pairwise_i8x16_s (unary v128) + @simd I16x8ExtAddPairwiseI8x16U => visit_i16x8_extadd_pairwise_i8x16_u (unary v128) + @simd I16x8Abs => visit_i16x8_abs (unary v128) + @simd I16x8Neg => visit_i16x8_neg (unary v128) + @simd I16x8Q15MulrSatS => visit_i16x8_q15mulr_sat_s (binary v128) + @simd I16x8AllTrue => visit_i16x8_all_true (test v128) + @simd I16x8Bitmask => visit_i16x8_bitmask (test v128) + @simd I16x8NarrowI32x4S => visit_i16x8_narrow_i32x4_s (binary v128) + @simd I16x8NarrowI32x4U => visit_i16x8_narrow_i32x4_u (binary v128) + @simd I16x8ExtendLowI8x16S => visit_i16x8_extend_low_i8x16_s (unary v128) + @simd I16x8ExtendHighI8x16S => visit_i16x8_extend_high_i8x16_s (unary v128) + @simd I16x8ExtendLowI8x16U => visit_i16x8_extend_low_i8x16_u (unary v128) + @simd I16x8ExtendHighI8x16U => visit_i16x8_extend_high_i8x16_u (unary v128) + @simd I16x8Shl => visit_i16x8_shl (shift v128) + @simd I16x8ShrS => visit_i16x8_shr_s (shift v128) + @simd I16x8ShrU => visit_i16x8_shr_u (shift v128) + @simd I16x8Add => visit_i16x8_add (binary v128) + @simd I16x8AddSatS => visit_i16x8_add_sat_s (binary v128) + @simd I16x8AddSatU => visit_i16x8_add_sat_u (binary v128) + @simd I16x8Sub => visit_i16x8_sub (binary v128) + @simd I16x8SubSatS => visit_i16x8_sub_sat_s (binary v128) + @simd I16x8SubSatU => visit_i16x8_sub_sat_u (binary v128) + @simd I16x8Mul => visit_i16x8_mul (binary v128) + @simd I16x8MinS => visit_i16x8_min_s (binary v128) + @simd I16x8MinU => visit_i16x8_min_u (binary v128) + @simd I16x8MaxS => visit_i16x8_max_s (binary v128) + @simd I16x8MaxU => visit_i16x8_max_u (binary v128) + @simd I16x8AvgrU => visit_i16x8_avgr_u (binary v128) + @simd I16x8ExtMulLowI8x16S => visit_i16x8_extmul_low_i8x16_s (binary v128) + @simd I16x8ExtMulHighI8x16S => visit_i16x8_extmul_high_i8x16_s (binary v128) + @simd I16x8ExtMulLowI8x16U => visit_i16x8_extmul_low_i8x16_u (binary v128) + @simd I16x8ExtMulHighI8x16U => visit_i16x8_extmul_high_i8x16_u (binary v128) + @simd I32x4ExtAddPairwiseI16x8S => visit_i32x4_extadd_pairwise_i16x8_s (unary v128) + @simd I32x4ExtAddPairwiseI16x8U => visit_i32x4_extadd_pairwise_i16x8_u (unary v128) + @simd I32x4Abs => visit_i32x4_abs (unary v128) + @simd I32x4Neg => visit_i32x4_neg (unary v128) + @simd I32x4AllTrue => visit_i32x4_all_true (test v128) + @simd I32x4Bitmask => visit_i32x4_bitmask (test v128) + @simd I32x4ExtendLowI16x8S => visit_i32x4_extend_low_i16x8_s (unary v128) + @simd I32x4ExtendHighI16x8S => visit_i32x4_extend_high_i16x8_s (unary v128) + @simd I32x4ExtendLowI16x8U => visit_i32x4_extend_low_i16x8_u (unary v128) + @simd I32x4ExtendHighI16x8U => visit_i32x4_extend_high_i16x8_u (unary v128) + @simd I32x4Shl => visit_i32x4_shl (shift v128) + @simd I32x4ShrS => visit_i32x4_shr_s (shift v128) + @simd I32x4ShrU => visit_i32x4_shr_u (shift v128) + @simd I32x4Add => visit_i32x4_add (binary v128) + @simd I32x4Sub => visit_i32x4_sub (binary v128) + @simd I32x4Mul => visit_i32x4_mul (binary v128) + @simd I32x4MinS => visit_i32x4_min_s (binary v128) + @simd I32x4MinU => visit_i32x4_min_u (binary v128) + @simd I32x4MaxS => visit_i32x4_max_s (binary v128) + @simd I32x4MaxU => visit_i32x4_max_u (binary v128) + @simd I32x4DotI16x8S => visit_i32x4_dot_i16x8_s (binary v128) + @simd I32x4ExtMulLowI16x8S => visit_i32x4_extmul_low_i16x8_s (binary v128) + @simd I32x4ExtMulHighI16x8S => visit_i32x4_extmul_high_i16x8_s (binary v128) + @simd I32x4ExtMulLowI16x8U => visit_i32x4_extmul_low_i16x8_u (binary v128) + @simd I32x4ExtMulHighI16x8U => visit_i32x4_extmul_high_i16x8_u (binary v128) + @simd I64x2Abs => visit_i64x2_abs (unary v128) + @simd I64x2Neg => visit_i64x2_neg (unary v128) + @simd I64x2AllTrue => visit_i64x2_all_true (test v128) + @simd I64x2Bitmask => visit_i64x2_bitmask (test v128) + @simd I64x2ExtendLowI32x4S => visit_i64x2_extend_low_i32x4_s (unary v128) + @simd I64x2ExtendHighI32x4S => visit_i64x2_extend_high_i32x4_s (unary v128) + @simd I64x2ExtendLowI32x4U => visit_i64x2_extend_low_i32x4_u (unary v128) + @simd I64x2ExtendHighI32x4U => visit_i64x2_extend_high_i32x4_u (unary v128) + @simd I64x2Shl => visit_i64x2_shl (shift v128) + @simd I64x2ShrS => visit_i64x2_shr_s (shift v128) + @simd I64x2ShrU => visit_i64x2_shr_u (shift v128) + @simd I64x2Add => visit_i64x2_add (binary v128) + @simd I64x2Sub => visit_i64x2_sub (binary v128) + @simd I64x2Mul => visit_i64x2_mul (binary v128) + @simd I64x2ExtMulLowI32x4S => visit_i64x2_extmul_low_i32x4_s (binary v128) + @simd I64x2ExtMulHighI32x4S => visit_i64x2_extmul_high_i32x4_s (binary v128) + @simd I64x2ExtMulLowI32x4U => visit_i64x2_extmul_low_i32x4_u (binary v128) + @simd I64x2ExtMulHighI32x4U => visit_i64x2_extmul_high_i32x4_u (binary v128) + @simd F32x4Ceil => visit_f32x4_ceil (unary v128f) + @simd F32x4Floor => visit_f32x4_floor (unary v128f) + @simd F32x4Trunc => visit_f32x4_trunc (unary v128f) + @simd F32x4Nearest => visit_f32x4_nearest (unary v128f) + @simd F32x4Abs => visit_f32x4_abs (unary v128f) + @simd F32x4Neg => visit_f32x4_neg (unary v128f) + @simd F32x4Sqrt => visit_f32x4_sqrt (unary v128f) + @simd F32x4Add => visit_f32x4_add (binary v128f) + @simd F32x4Sub => visit_f32x4_sub (binary v128f) + @simd F32x4Mul => visit_f32x4_mul (binary v128f) + @simd F32x4Div => visit_f32x4_div (binary v128f) + @simd F32x4Min => visit_f32x4_min (binary v128f) + @simd F32x4Max => visit_f32x4_max (binary v128f) + @simd F32x4PMin => visit_f32x4_pmin (binary v128f) + @simd F32x4PMax => visit_f32x4_pmax (binary v128f) + @simd F64x2Ceil => visit_f64x2_ceil (unary v128f) + @simd F64x2Floor => visit_f64x2_floor (unary v128f) + @simd F64x2Trunc => visit_f64x2_trunc (unary v128f) + @simd F64x2Nearest => visit_f64x2_nearest (unary v128f) + @simd F64x2Abs => visit_f64x2_abs (unary v128f) + @simd F64x2Neg => visit_f64x2_neg (unary v128f) + @simd F64x2Sqrt => visit_f64x2_sqrt (unary v128f) + @simd F64x2Add => visit_f64x2_add (binary v128f) + @simd F64x2Sub => visit_f64x2_sub (binary v128f) + @simd F64x2Mul => visit_f64x2_mul (binary v128f) + @simd F64x2Div => visit_f64x2_div (binary v128f) + @simd F64x2Min => visit_f64x2_min (binary v128f) + @simd F64x2Max => visit_f64x2_max (binary v128f) + @simd F64x2PMin => visit_f64x2_pmin (binary v128f) + @simd F64x2PMax => visit_f64x2_pmax (binary v128f) + @simd I32x4TruncSatF32x4S => visit_i32x4_trunc_sat_f32x4_s (unary v128f) + @simd I32x4TruncSatF32x4U => visit_i32x4_trunc_sat_f32x4_u (unary v128f) + @simd F32x4ConvertI32x4S => visit_f32x4_convert_i32x4_s (unary v128f) + @simd F32x4ConvertI32x4U => visit_f32x4_convert_i32x4_u (unary v128f) + @simd I32x4TruncSatF64x2SZero => visit_i32x4_trunc_sat_f64x2_s_zero (unary v128f) + @simd I32x4TruncSatF64x2UZero => visit_i32x4_trunc_sat_f64x2_u_zero (unary v128f) + @simd F64x2ConvertLowI32x4S => visit_f64x2_convert_low_i32x4_s (unary v128f) + @simd F64x2ConvertLowI32x4U => visit_f64x2_convert_low_i32x4_u (unary v128f) + @simd F32x4DemoteF64x2Zero => visit_f32x4_demote_f64x2_zero (unary v128f) + @simd F64x2PromoteLowF32x4 => visit_f64x2_promote_low_f32x4 (unary v128f) + + // Relaxed SIMD operators + // https://github.com/WebAssembly/relaxed-simd + @relaxed_simd I8x16RelaxedSwizzle => visit_i8x16_relaxed_swizzle (binary v128) + @relaxed_simd I32x4RelaxedTruncF32x4S => visit_i32x4_relaxed_trunc_f32x4_s (unary v128) + @relaxed_simd I32x4RelaxedTruncF32x4U => visit_i32x4_relaxed_trunc_f32x4_u (unary v128) + @relaxed_simd I32x4RelaxedTruncF64x2SZero => visit_i32x4_relaxed_trunc_f64x2_s_zero (unary v128) + @relaxed_simd I32x4RelaxedTruncF64x2UZero => visit_i32x4_relaxed_trunc_f64x2_u_zero (unary v128) + @relaxed_simd F32x4RelaxedMadd => visit_f32x4_relaxed_madd (ternary v128) + @relaxed_simd F32x4RelaxedNmadd => visit_f32x4_relaxed_nmadd (ternary v128) + @relaxed_simd F64x2RelaxedMadd => visit_f64x2_relaxed_madd (ternary v128) + @relaxed_simd F64x2RelaxedNmadd => visit_f64x2_relaxed_nmadd (ternary v128) + @relaxed_simd I8x16RelaxedLaneselect => visit_i8x16_relaxed_laneselect (ternary v128) + @relaxed_simd I16x8RelaxedLaneselect => visit_i16x8_relaxed_laneselect (ternary v128) + @relaxed_simd I32x4RelaxedLaneselect => visit_i32x4_relaxed_laneselect (ternary v128) + @relaxed_simd I64x2RelaxedLaneselect => visit_i64x2_relaxed_laneselect (ternary v128) + @relaxed_simd F32x4RelaxedMin => visit_f32x4_relaxed_min (binary v128) + @relaxed_simd F32x4RelaxedMax => visit_f32x4_relaxed_max (binary v128) + @relaxed_simd F64x2RelaxedMin => visit_f64x2_relaxed_min (binary v128) + @relaxed_simd F64x2RelaxedMax => visit_f64x2_relaxed_max (binary v128) + @relaxed_simd I16x8RelaxedQ15mulrS => visit_i16x8_relaxed_q15mulr_s (binary v128) + @relaxed_simd I16x8RelaxedDotI8x16I7x16S => visit_i16x8_relaxed_dot_i8x16_i7x16_s (binary v128) + @relaxed_simd I32x4RelaxedDotI8x16I7x16AddS => visit_i32x4_relaxed_dot_i8x16_i7x16_add_s (ternary v128) + } + }; +} + macro_rules! format_err { ($offset:expr, $($arg:tt)*) => { crate::BinaryReaderError::fmt(format_args!($($arg)*), $offset) From 5944fb999969d38b6aca5e7264edd3e74d9b8805 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 12 Nov 2024 11:39:09 +0100 Subject: [PATCH 02/83] define SimdOperator enum --- crates/wasmparser/src/readers/core/operators.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/crates/wasmparser/src/readers/core/operators.rs b/crates/wasmparser/src/readers/core/operators.rs index 2cdf21124a..1a82e7b87f 100644 --- a/crates/wasmparser/src/readers/core/operators.rs +++ b/crates/wasmparser/src/readers/core/operators.rs @@ -229,6 +229,22 @@ macro_rules! define_operator { } for_each_operator!(define_operator); +macro_rules! define_simd_operator { + ($(@$proposal:ident $op:ident $({ $($payload:tt)* })? => $visit:ident ($($ann:tt)*))*) => { + /// SIMD instructions as defined [here]. + /// + /// [here]: https://webassembly.github.io/spec/core/binary/instructions.html + #[derive(Debug, Clone, Eq, PartialEq)] + #[allow(missing_docs)] + pub enum SimdOperator { + $( + $op $({ $($payload)* })?, + )* + } + } +} +for_each_simd_operator!(define_simd_operator); + /// A reader for a core WebAssembly function's operators. #[derive(Clone)] pub struct OperatorsReader<'a> { From d3e99f052d7c8d06ad3a47933b7e604bab4c2bfc Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 12 Nov 2024 11:46:51 +0100 Subject: [PATCH 03/83] add SimdOperator::operator_arity impl --- crates/wasmparser/src/arity.rs | 29 +++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/crates/wasmparser/src/arity.rs b/crates/wasmparser/src/arity.rs index b98d0054a8..53bd5262d6 100644 --- a/crates/wasmparser/src/arity.rs +++ b/crates/wasmparser/src/arity.rs @@ -14,8 +14,7 @@ */ use crate::{ - BinaryReader, BinaryReaderError, BlockType, CompositeInnerType, ContType, FrameKind, FuncType, - Operator, RefType, Result, SubType, + BinaryReader, BinaryReaderError, BlockType, CompositeInnerType, ContType, FrameKind, FuncType, Operator, RefType, Result, SimdOperator, SubType }; /// To compute the arity (param and result counts) of "variable-arity" @@ -240,10 +239,10 @@ impl Operator<'_> { /// an impl ModuleArity, which stores the necessary module state. pub fn operator_arity(&self, module: &impl ModuleArity) -> Option<(u32, u32)> { macro_rules! define_arity { - ($(@$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) )*) => ( + ( $(@$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) )*) => ( match self.clone() { $( - Operator::$op $({ $($arg),* })? => { + Self::$op $({ $($arg),* })? => { $( $(let _ = $arg;)* )? @@ -256,3 +255,25 @@ impl Operator<'_> { for_each_operator!(define_arity) } } + +impl SimdOperator { + /// Compute the arity (param and result counts) of the operator, given + /// an impl ModuleArity, which stores the necessary module state. + pub fn operator_arity(&self) -> Option<(u32, u32)> { + macro_rules! define_arity { + ( $(@$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) )*) => ( + match self.clone() { + $( + Self::$op $({ $($arg),* })? => { + $( + $(let _ = $arg;)* + )? + operator_arity!(arity module $({ $($arg: $argty),* })? $($ann)*) + } + )* + } + ); + } + for_each_simd_operator!(define_arity) + } +} From 9698f979b497452e1d3f3516822ff9feaa2261f8 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 12 Nov 2024 11:54:48 +0100 Subject: [PATCH 04/83] add VisitSimdOperator trait definition --- .../wasmparser/src/readers/core/operators.rs | 31 ++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/crates/wasmparser/src/readers/core/operators.rs b/crates/wasmparser/src/readers/core/operators.rs index 1a82e7b87f..5a435fe9cf 100644 --- a/crates/wasmparser/src/readers/core/operators.rs +++ b/crates/wasmparser/src/readers/core/operators.rs @@ -441,7 +441,6 @@ pub trait VisitOperator<'a> { )* } } - } for_each_operator!(visit_operator) } @@ -449,6 +448,36 @@ pub trait VisitOperator<'a> { for_each_operator!(define_visit_operator); } +/// Trait implemented by types that can visit all [`Operator`] variants. +#[allow(missing_docs)] +pub trait VisitSimdOperator { + /// The result type of the visitor. + type Output; + + /// Visits the SIMD [`Operator`] `op` using the given `offset`. + /// + /// # Note + /// + /// This is a convenience method that is intended for non-performance + /// critical use cases. For performance critical implementations users + /// are recommended to directly use the respective `visit` methods or + /// implement [`VisitOperator`] on their own. + fn visit_simd_operator(&mut self, op: &SimdOperator) -> Self::Output { + macro_rules! visit_simd_operator { + ($(@$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => { + match op { + $( + SimdOperator::$op $({ $($arg),* })? => self.$visit($($($arg.clone()),*)?), + )* + } + } + } + for_each_simd_operator!(visit_simd_operator) + } + + for_each_simd_operator!(define_visit_operator); +} + macro_rules! define_visit_operator_delegate { ($(@$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => { $( From 76557eb745566d34d1ad3778a3011f9a329fe8d1 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 12 Nov 2024 11:58:15 +0100 Subject: [PATCH 05/83] define VisitSimdOperator delegates --- crates/wasmparser/src/readers/core/operators.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/crates/wasmparser/src/readers/core/operators.rs b/crates/wasmparser/src/readers/core/operators.rs index 5a435fe9cf..7c1fa1683a 100644 --- a/crates/wasmparser/src/readers/core/operators.rs +++ b/crates/wasmparser/src/readers/core/operators.rs @@ -496,6 +496,14 @@ impl<'a, 'b, V: VisitOperator<'a> + ?Sized> VisitOperator<'a> for &'b mut V { for_each_operator!(define_visit_operator_delegate); } +impl<'a, V: VisitSimdOperator + ?Sized> VisitSimdOperator for &'a mut V { + type Output = V::Output; + fn visit_simd_operator(&mut self, op: &SimdOperator) -> Self::Output { + V::visit_simd_operator(*self, op) + } + for_each_simd_operator!(define_visit_operator_delegate); +} + impl<'a, V: VisitOperator<'a> + ?Sized> VisitOperator<'a> for Box { type Output = V::Output; fn visit_operator(&mut self, op: &Operator<'a>) -> Self::Output { @@ -504,6 +512,14 @@ impl<'a, V: VisitOperator<'a> + ?Sized> VisitOperator<'a> for Box { for_each_operator!(define_visit_operator_delegate); } +impl VisitSimdOperator for Box { + type Output = V::Output; + fn visit_simd_operator(&mut self, op: &SimdOperator) -> Self::Output { + V::visit_simd_operator(&mut *self, op) + } + for_each_simd_operator!(define_visit_operator_delegate); +} + /// A `try_table` entries representation. #[derive(Clone, Debug, Eq, PartialEq)] pub struct TryTable { From 6adace160b2b49be40fe94d8e4db3567c78ea39a Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 12 Nov 2024 12:34:13 +0100 Subject: [PATCH 06/83] add benchmark NopVisit impl --- crates/wasmparser/benches/benchmark.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/crates/wasmparser/benches/benchmark.rs b/crates/wasmparser/benches/benchmark.rs index e9149e189a..c5eb773746 100644 --- a/crates/wasmparser/benches/benchmark.rs +++ b/crates/wasmparser/benches/benchmark.rs @@ -1,6 +1,7 @@ use anyhow::Result; use criterion::{criterion_group, criterion_main, Criterion}; use once_cell::unsync::Lazy; +use wasmparser::VisitSimdOperator; use std::fs; use std::path::Path; use std::path::PathBuf; @@ -366,3 +367,10 @@ impl<'a> VisitOperator<'a> for NopVisit { wasmparser::for_each_operator!(define_visit_operator); } + +#[allow(unused_variables)] +impl<'a> VisitSimdOperator for NopVisit { + type Output = (); + + wasmparser::for_each_simd_operator!(define_visit_operator); +} From 2fa11f2258451e5b5ce1bd196380e82344ef1348 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 12 Nov 2024 12:45:15 +0100 Subject: [PATCH 07/83] add simd crate feature --- crates/wasmparser/Cargo.toml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/wasmparser/Cargo.toml b/crates/wasmparser/Cargo.toml index a814cb8244..2b628883e1 100644 --- a/crates/wasmparser/Cargo.toml +++ b/crates/wasmparser/Cargo.toml @@ -84,3 +84,5 @@ features = [] # WebAssembly. This is enabled by default but if your use case is only # interested in working with core modules then this feature can be disabled. component-model = [] + +simd = [] From baa1de5d51b8f3f3f2c945bba7f03c41aa0ecb5b Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 12 Nov 2024 12:45:43 +0100 Subject: [PATCH 08/83] add simd_visitor method to VisitOperator trait --- .../wasmparser/src/readers/core/operators.rs | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/crates/wasmparser/src/readers/core/operators.rs b/crates/wasmparser/src/readers/core/operators.rs index 7c1fa1683a..557bfb3a8a 100644 --- a/crates/wasmparser/src/readers/core/operators.rs +++ b/crates/wasmparser/src/readers/core/operators.rs @@ -445,15 +445,14 @@ pub trait VisitOperator<'a> { for_each_operator!(visit_operator) } + fn simd_visitor(&mut self) -> Option<&mut dyn VisitSimdOperator> { None } + for_each_operator!(define_visit_operator); } /// Trait implemented by types that can visit all [`Operator`] variants. #[allow(missing_docs)] -pub trait VisitSimdOperator { - /// The result type of the visitor. - type Output; - +pub trait VisitSimdOperator<'a>: VisitOperator<'a> { /// Visits the SIMD [`Operator`] `op` using the given `offset`. /// /// # Note @@ -493,11 +492,13 @@ impl<'a, 'b, V: VisitOperator<'a> + ?Sized> VisitOperator<'a> for &'b mut V { fn visit_operator(&mut self, op: &Operator<'a>) -> Self::Output { V::visit_operator(*self, op) } + fn simd_visitor(&mut self) -> Option<&mut dyn VisitSimdOperator> { + V::simd_visitor(*self) + } for_each_operator!(define_visit_operator_delegate); } -impl<'a, V: VisitSimdOperator + ?Sized> VisitSimdOperator for &'a mut V { - type Output = V::Output; +impl<'a, 'b, V: VisitSimdOperator<'a> + ?Sized> VisitSimdOperator<'a> for &'b mut V { fn visit_simd_operator(&mut self, op: &SimdOperator) -> Self::Output { V::visit_simd_operator(*self, op) } @@ -509,11 +510,13 @@ impl<'a, V: VisitOperator<'a> + ?Sized> VisitOperator<'a> for Box { fn visit_operator(&mut self, op: &Operator<'a>) -> Self::Output { V::visit_operator(&mut *self, op) } + fn simd_visitor(&mut self) -> Option<&mut dyn VisitSimdOperator> { + V::simd_visitor(&mut *self) + } for_each_operator!(define_visit_operator_delegate); } -impl VisitSimdOperator for Box { - type Output = V::Output; +impl<'a, V: VisitSimdOperator<'a> + ?Sized> VisitSimdOperator<'a> for Box { fn visit_simd_operator(&mut self, op: &SimdOperator) -> Self::Output { V::visit_simd_operator(&mut *self, op) } From bb4a41538f45afb9949e8878a5452f6d0f86221c Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 12 Nov 2024 12:48:53 +0100 Subject: [PATCH 09/83] add VisitSimdOperator impl to OperatorFactory --- crates/wasmparser/src/binary_reader.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/crates/wasmparser/src/binary_reader.rs b/crates/wasmparser/src/binary_reader.rs index aab5fc33b2..c0b8a16c2b 100644 --- a/crates/wasmparser/src/binary_reader.rs +++ b/crates/wasmparser/src/binary_reader.rs @@ -2100,9 +2100,18 @@ macro_rules! define_visit_operator { impl<'a> VisitOperator<'a> for OperatorFactory<'a> { type Output = Operator<'a>; + #[cfg(feature = "simd")] + fn simd_visitor(&mut self) -> Option<&mut dyn VisitSimdOperator> { + Some(self) + } + for_each_operator!(define_visit_operator); } +impl<'a> VisitSimdOperator<'a> for OperatorFactory<'a> { + for_each_simd_operator!(define_visit_operator); +} + /// Iterator returned from [`BinaryReader::read_iter`]. pub struct BinaryReaderIter<'a, 'me, T: FromReader<'a>> { remaining: usize, From df32e796b941054ea3983414492c604aafceef70 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 12 Nov 2024 12:53:42 +0100 Subject: [PATCH 10/83] use VisitOperator::simd_visitor in BinaryReader::visit_operator --- crates/wasmparser/src/binary_reader.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/crates/wasmparser/src/binary_reader.rs b/crates/wasmparser/src/binary_reader.rs index c0b8a16c2b..12587110db 100644 --- a/crates/wasmparser/src/binary_reader.rs +++ b/crates/wasmparser/src/binary_reader.rs @@ -1120,7 +1120,12 @@ impl<'a> BinaryReader<'a> { 0xfb => self.visit_0xfb_operator(pos, visitor)?, 0xfc => self.visit_0xfc_operator(pos, visitor)?, - 0xfd => self.visit_0xfd_operator(pos, visitor)?, + 0xfd => { + let Some(ref mut visitor) = visitor.simd_visitor() else { + bail!(pos, "unexpected SIMD opcode: 0x{code:x}") + }; + self.visit_0xfd_operator(pos, visitor)? + }, 0xfe => self.visit_0xfe_operator(pos, visitor)?, _ => bail!(pos, "illegal opcode: 0x{code:x}"), @@ -1372,7 +1377,7 @@ impl<'a> BinaryReader<'a> { visitor: &mut T, ) -> Result<>::Output> where - T: VisitOperator<'a>, + T: VisitSimdOperator<'a>, { let code = self.read_var_u32()?; Ok(match code { From de0f049544ea0ba3554c57cae236f1f6f82862a6 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 12 Nov 2024 13:04:15 +0100 Subject: [PATCH 11/83] add lifetime to return value of simd_visitor method --- crates/wasmparser/src/binary_reader.rs | 4 ++-- crates/wasmparser/src/readers/core/operators.rs | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/crates/wasmparser/src/binary_reader.rs b/crates/wasmparser/src/binary_reader.rs index 12587110db..b7d8c0d9a5 100644 --- a/crates/wasmparser/src/binary_reader.rs +++ b/crates/wasmparser/src/binary_reader.rs @@ -1121,10 +1121,10 @@ impl<'a> BinaryReader<'a> { 0xfb => self.visit_0xfb_operator(pos, visitor)?, 0xfc => self.visit_0xfc_operator(pos, visitor)?, 0xfd => { - let Some(ref mut visitor) = visitor.simd_visitor() else { + let Some(mut visitor) = visitor.simd_visitor() else { bail!(pos, "unexpected SIMD opcode: 0x{code:x}") }; - self.visit_0xfd_operator(pos, visitor)? + self.visit_0xfd_operator(pos, &mut visitor)? }, 0xfe => self.visit_0xfe_operator(pos, visitor)?, diff --git a/crates/wasmparser/src/readers/core/operators.rs b/crates/wasmparser/src/readers/core/operators.rs index 557bfb3a8a..5c794d8482 100644 --- a/crates/wasmparser/src/readers/core/operators.rs +++ b/crates/wasmparser/src/readers/core/operators.rs @@ -445,7 +445,7 @@ pub trait VisitOperator<'a> { for_each_operator!(visit_operator) } - fn simd_visitor(&mut self) -> Option<&mut dyn VisitSimdOperator> { None } + fn simd_visitor(&mut self) -> Option<&mut dyn VisitSimdOperator<'a, Output = Self::Output>> { None } for_each_operator!(define_visit_operator); } @@ -492,7 +492,7 @@ impl<'a, 'b, V: VisitOperator<'a> + ?Sized> VisitOperator<'a> for &'b mut V { fn visit_operator(&mut self, op: &Operator<'a>) -> Self::Output { V::visit_operator(*self, op) } - fn simd_visitor(&mut self) -> Option<&mut dyn VisitSimdOperator> { + fn simd_visitor(&mut self) -> Option<&mut dyn VisitSimdOperator<'a, Output = V::Output>> { V::simd_visitor(*self) } for_each_operator!(define_visit_operator_delegate); @@ -510,7 +510,7 @@ impl<'a, V: VisitOperator<'a> + ?Sized> VisitOperator<'a> for Box { fn visit_operator(&mut self, op: &Operator<'a>) -> Self::Output { V::visit_operator(&mut *self, op) } - fn simd_visitor(&mut self) -> Option<&mut dyn VisitSimdOperator> { + fn simd_visitor(&mut self) -> Option<&mut dyn VisitSimdOperator<'a, Output = V::Output>> { V::simd_visitor(&mut *self) } for_each_operator!(define_visit_operator_delegate); From efd1c0c752df40f24e70b9a79d4f795c77c23a08 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 12 Nov 2024 13:04:36 +0100 Subject: [PATCH 12/83] add VisitSimdOperator impl for OperatorValidator --- crates/wasmparser/src/validator/operators.rs | 3341 +++++++++--------- 1 file changed, 1676 insertions(+), 1665 deletions(-) diff --git a/crates/wasmparser/src/validator/operators.rs b/crates/wasmparser/src/validator/operators.rs index 758447f3de..434377d156 100644 --- a/crates/wasmparser/src/validator/operators.rs +++ b/crates/wasmparser/src/validator/operators.rs @@ -28,7 +28,7 @@ use crate::{ MemArg, ModuleArity, RefType, Result, ResumeTable, StorageType, StructType, SubType, TableType, TryTable, UnpackedIndex, ValType, VisitOperator, WasmFeatures, WasmModuleResources, V128, }; -use crate::{prelude::*, CompositeInnerType, Ordering}; +use crate::{prelude::*, CompositeInnerType, Ordering, VisitSimdOperator}; use core::ops::{Deref, DerefMut}; pub(crate) struct OperatorValidator { @@ -1847,6 +1847,11 @@ where { type Output = Result<()>; + #[cfg(feature = "simd")] + fn simd_visitor(&mut self) -> Option<&mut dyn VisitSimdOperator<'a, Output = Self::Output>> { + Some(self) + } + fn visit_nop(&mut self) -> Self::Output { Ok(()) } @@ -3129,2027 +3134,2033 @@ where } self.push_operand(ValType::I32) } - fn visit_v128_load(&mut self, memarg: MemArg) -> Self::Output { - let ty = self.check_memarg(memarg)?; + fn visit_memory_init(&mut self, segment: u32, mem: u32) -> Self::Output { + let ty = self.check_memory_index(mem)?; + self.check_data_segment(segment)?; + self.pop_operand(Some(ValType::I32))?; + self.pop_operand(Some(ValType::I32))?; self.pop_operand(Some(ty))?; - self.push_operand(ValType::V128)?; Ok(()) } - fn visit_v128_store(&mut self, memarg: MemArg) -> Self::Output { - let ty = self.check_memarg(memarg)?; - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(ty))?; + fn visit_data_drop(&mut self, segment: u32) -> Self::Output { + self.check_data_segment(segment)?; Ok(()) } - fn visit_v128_const(&mut self, _value: V128) -> Self::Output { - self.push_operand(ValType::V128)?; + fn visit_memory_copy(&mut self, dst: u32, src: u32) -> Self::Output { + let dst_ty = self.check_memory_index(dst)?; + let src_ty = self.check_memory_index(src)?; + + // The length operand here is the smaller of src/dst, which is + // i32 if one is i32 + self.pop_operand(Some(match src_ty { + ValType::I32 => ValType::I32, + _ => dst_ty, + }))?; + + // ... and the offset into each memory is required to be + // whatever the indexing type is for that memory + self.pop_operand(Some(src_ty))?; + self.pop_operand(Some(dst_ty))?; Ok(()) } - fn visit_i8x16_splat(&mut self) -> Self::Output { - self.check_v128_splat(ValType::I32) - } - fn visit_i16x8_splat(&mut self) -> Self::Output { - self.check_v128_splat(ValType::I32) - } - fn visit_i32x4_splat(&mut self) -> Self::Output { - self.check_v128_splat(ValType::I32) - } - fn visit_i64x2_splat(&mut self) -> Self::Output { - self.check_v128_splat(ValType::I64) - } - fn visit_f32x4_splat(&mut self) -> Self::Output { - self.check_floats_enabled()?; - self.check_v128_splat(ValType::F32) - } - fn visit_f64x2_splat(&mut self) -> Self::Output { - self.check_floats_enabled()?; - self.check_v128_splat(ValType::F64) - } - fn visit_i8x16_extract_lane_s(&mut self, lane: u8) -> Self::Output { - self.check_simd_lane_index(lane, 16)?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::I32)?; + fn visit_memory_fill(&mut self, mem: u32) -> Self::Output { + let ty = self.check_memory_index(mem)?; + self.pop_operand(Some(ty))?; + self.pop_operand(Some(ValType::I32))?; + self.pop_operand(Some(ty))?; Ok(()) } - fn visit_i8x16_extract_lane_u(&mut self, lane: u8) -> Self::Output { - self.visit_i8x16_extract_lane_s(lane) - } - fn visit_i16x8_extract_lane_s(&mut self, lane: u8) -> Self::Output { - self.check_simd_lane_index(lane, 8)?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::I32)?; + fn visit_memory_discard(&mut self, mem: u32) -> Self::Output { + let ty = self.check_memory_index(mem)?; + self.pop_operand(Some(ty))?; + self.pop_operand(Some(ty))?; Ok(()) } - fn visit_i16x8_extract_lane_u(&mut self, lane: u8) -> Self::Output { - self.visit_i16x8_extract_lane_s(lane) - } - fn visit_i32x4_extract_lane(&mut self, lane: u8) -> Self::Output { - self.check_simd_lane_index(lane, 4)?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::I32)?; + fn visit_table_init(&mut self, segment: u32, table: u32) -> Self::Output { + let table = self.table_type_at(table)?; + let segment_ty = self.element_type_at(segment)?; + if !self + .resources + .is_subtype(ValType::Ref(segment_ty), ValType::Ref(table.element_type)) + { + bail!(self.offset, "type mismatch"); + } + self.pop_operand(Some(ValType::I32))?; + self.pop_operand(Some(ValType::I32))?; + self.pop_operand(Some(table.index_type()))?; Ok(()) } - fn visit_i8x16_replace_lane(&mut self, lane: u8) -> Self::Output { - self.check_simd_lane_index(lane, 16)?; - self.pop_operand(Some(ValType::I32))?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::V128)?; + fn visit_elem_drop(&mut self, segment: u32) -> Self::Output { + self.element_type_at(segment)?; Ok(()) } - fn visit_i16x8_replace_lane(&mut self, lane: u8) -> Self::Output { - self.check_simd_lane_index(lane, 8)?; - self.pop_operand(Some(ValType::I32))?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::V128)?; + fn visit_table_copy(&mut self, dst_table: u32, src_table: u32) -> Self::Output { + let src = self.table_type_at(src_table)?; + let dst = self.table_type_at(dst_table)?; + if !self.resources.is_subtype( + ValType::Ref(src.element_type), + ValType::Ref(dst.element_type), + ) { + bail!(self.offset, "type mismatch"); + } + + // The length operand here is the smaller of src/dst, which is + // i32 if one is i32 + self.pop_operand(Some(match src.index_type() { + ValType::I32 => ValType::I32, + _ => dst.index_type(), + }))?; + + // ... and the offset into each table is required to be + // whatever the indexing type is for that table + self.pop_operand(Some(src.index_type()))?; + self.pop_operand(Some(dst.index_type()))?; Ok(()) } - fn visit_i32x4_replace_lane(&mut self, lane: u8) -> Self::Output { - self.check_simd_lane_index(lane, 4)?; - self.pop_operand(Some(ValType::I32))?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::V128)?; + fn visit_table_get(&mut self, table: u32) -> Self::Output { + let table = self.table_type_at(table)?; + debug_assert_type_indices_are_ids(table.element_type.into()); + self.pop_operand(Some(table.index_type()))?; + self.push_operand(table.element_type)?; Ok(()) } - fn visit_i64x2_extract_lane(&mut self, lane: u8) -> Self::Output { - self.check_simd_lane_index(lane, 2)?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::I64)?; + fn visit_table_atomic_get(&mut self, _ordering: Ordering, table: u32) -> Self::Output { + self.visit_table_get(table)?; + // No validation of `ordering` is needed because `table.atomic.get` can + // be used on both shared and unshared tables. But we do need to limit + // which types can be used with this instruction. + let ty = self.table_type_at(table)?.element_type; + let supertype = RefType::ANYREF.shared().unwrap(); + if !self.resources.is_subtype(ty.into(), supertype.into()) { + bail!( + self.offset, + "invalid type: `table.atomic.get` only allows subtypes of `anyref`" + ); + } Ok(()) } - fn visit_i64x2_replace_lane(&mut self, lane: u8) -> Self::Output { - self.check_simd_lane_index(lane, 2)?; - self.pop_operand(Some(ValType::I64))?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::V128)?; + fn visit_table_set(&mut self, table: u32) -> Self::Output { + let table = self.table_type_at(table)?; + debug_assert_type_indices_are_ids(table.element_type.into()); + self.pop_operand(Some(table.element_type.into()))?; + self.pop_operand(Some(table.index_type()))?; Ok(()) } - fn visit_f32x4_extract_lane(&mut self, lane: u8) -> Self::Output { - self.check_floats_enabled()?; - self.check_simd_lane_index(lane, 4)?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::F32)?; + fn visit_table_atomic_set(&mut self, _ordering: Ordering, table: u32) -> Self::Output { + self.visit_table_set(table)?; + // No validation of `ordering` is needed because `table.atomic.set` can + // be used on both shared and unshared tables. But we do need to limit + // which types can be used with this instruction. + let ty = self.table_type_at(table)?.element_type; + let supertype = RefType::ANYREF.shared().unwrap(); + if !self.resources.is_subtype(ty.into(), supertype.into()) { + bail!( + self.offset, + "invalid type: `table.atomic.set` only allows subtypes of `anyref`" + ); + } Ok(()) } - fn visit_f32x4_replace_lane(&mut self, lane: u8) -> Self::Output { - self.check_floats_enabled()?; - self.check_simd_lane_index(lane, 4)?; - self.pop_operand(Some(ValType::F32))?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::V128)?; + fn visit_table_grow(&mut self, table: u32) -> Self::Output { + let table = self.table_type_at(table)?; + debug_assert_type_indices_are_ids(table.element_type.into()); + self.pop_operand(Some(table.index_type()))?; + self.pop_operand(Some(table.element_type.into()))?; + self.push_operand(table.index_type())?; Ok(()) } - fn visit_f64x2_extract_lane(&mut self, lane: u8) -> Self::Output { - self.check_floats_enabled()?; - self.check_simd_lane_index(lane, 2)?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::F64)?; + fn visit_table_size(&mut self, table: u32) -> Self::Output { + let table = self.table_type_at(table)?; + self.push_operand(table.index_type())?; Ok(()) } - fn visit_f64x2_replace_lane(&mut self, lane: u8) -> Self::Output { - self.check_floats_enabled()?; - self.check_simd_lane_index(lane, 2)?; - self.pop_operand(Some(ValType::F64))?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::V128)?; + fn visit_table_fill(&mut self, table: u32) -> Self::Output { + let table = self.table_type_at(table)?; + debug_assert_type_indices_are_ids(table.element_type.into()); + self.pop_operand(Some(table.index_type()))?; + self.pop_operand(Some(table.element_type.into()))?; + self.pop_operand(Some(table.index_type()))?; Ok(()) } - fn visit_f32x4_eq(&mut self) -> Self::Output { - self.check_v128_fbinary_op() + fn visit_table_atomic_rmw_xchg(&mut self, _ordering: Ordering, table: u32) -> Self::Output { + let table = self.table_type_at(table)?; + let elem_ty = table.element_type.into(); + debug_assert_type_indices_are_ids(elem_ty); + let supertype = RefType::ANYREF.shared().unwrap(); + if !self.resources.is_subtype(elem_ty, supertype.into()) { + bail!( + self.offset, + "invalid type: `table.atomic.rmw.xchg` only allows subtypes of `anyref`" + ); + } + self.pop_operand(Some(elem_ty))?; + self.pop_operand(Some(table.index_type()))?; + self.push_operand(elem_ty)?; + Ok(()) } - fn visit_f32x4_ne(&mut self) -> Self::Output { - self.check_v128_fbinary_op() + fn visit_table_atomic_rmw_cmpxchg(&mut self, _ordering: Ordering, table: u32) -> Self::Output { + let table = self.table_type_at(table)?; + let elem_ty = table.element_type.into(); + debug_assert_type_indices_are_ids(elem_ty); + let supertype = RefType::EQREF.shared().unwrap(); + if !self.resources.is_subtype(elem_ty, supertype.into()) { + bail!( + self.offset, + "invalid type: `table.atomic.rmw.cmpxchg` only allows subtypes of `eqref`" + ); + } + self.pop_operand(Some(elem_ty))?; + self.pop_operand(Some(elem_ty))?; + self.pop_operand(Some(table.index_type()))?; + self.push_operand(elem_ty)?; + Ok(()) } - fn visit_f32x4_lt(&mut self) -> Self::Output { - self.check_v128_fbinary_op() + fn visit_struct_new(&mut self, struct_type_index: u32) -> Self::Output { + let struct_ty = self.struct_type_at(struct_type_index)?; + for ty in struct_ty.fields.iter().rev() { + self.pop_operand(Some(ty.element_type.unpack()))?; + } + self.push_concrete_ref(false, struct_type_index)?; + Ok(()) } - fn visit_f32x4_gt(&mut self) -> Self::Output { - self.check_v128_fbinary_op() - } - fn visit_f32x4_le(&mut self) -> Self::Output { - self.check_v128_fbinary_op() + fn visit_struct_new_default(&mut self, type_index: u32) -> Self::Output { + let ty = self.struct_type_at(type_index)?; + for field in ty.fields.iter() { + let val_ty = field.element_type.unpack(); + if !val_ty.is_defaultable() { + bail!( + self.offset, + "invalid `struct.new_default`: {val_ty} field is not defaultable" + ); + } + } + self.push_concrete_ref(false, type_index)?; + Ok(()) } - fn visit_f32x4_ge(&mut self) -> Self::Output { - self.check_v128_fbinary_op() + fn visit_struct_get(&mut self, struct_type_index: u32, field_index: u32) -> Self::Output { + let field_ty = self.struct_field_at(struct_type_index, field_index)?; + if field_ty.element_type.is_packed() { + bail!( + self.offset, + "can only use struct `get` with non-packed storage types" + ) + } + self.pop_concrete_ref(true, struct_type_index)?; + self.push_operand(field_ty.element_type.unpack()) } - fn visit_f64x2_eq(&mut self) -> Self::Output { - self.check_v128_fbinary_op() + fn visit_struct_atomic_get( + &mut self, + _ordering: Ordering, + struct_type_index: u32, + field_index: u32, + ) -> Self::Output { + self.visit_struct_get(struct_type_index, field_index)?; + // The `atomic` version has some additional type restrictions. + let ty = self + .struct_field_at(struct_type_index, field_index)? + .element_type; + let is_valid_type = match ty { + StorageType::Val(ValType::I32) | StorageType::Val(ValType::I64) => true, + StorageType::Val(v) => self + .resources + .is_subtype(v, RefType::ANYREF.shared().unwrap().into()), + _ => false, + }; + if !is_valid_type { + bail!( + self.offset, + "invalid type: `struct.atomic.get` only allows `i32`, `i64` and subtypes of `anyref`" + ); + } + Ok(()) } - fn visit_f64x2_ne(&mut self) -> Self::Output { - self.check_v128_fbinary_op() + fn visit_struct_get_s(&mut self, struct_type_index: u32, field_index: u32) -> Self::Output { + let field_ty = self.struct_field_at(struct_type_index, field_index)?; + if !field_ty.element_type.is_packed() { + bail!( + self.offset, + "cannot use struct.get_s with non-packed storage types" + ) + } + self.pop_concrete_ref(true, struct_type_index)?; + self.push_operand(field_ty.element_type.unpack()) } - fn visit_f64x2_lt(&mut self) -> Self::Output { - self.check_v128_fbinary_op() + fn visit_struct_atomic_get_s( + &mut self, + _ordering: Ordering, + struct_type_index: u32, + field_index: u32, + ) -> Self::Output { + self.visit_struct_get_s(struct_type_index, field_index)?; + // This instruction has the same type restrictions as the non-`atomic` version. + debug_assert!(matches!( + self.struct_field_at(struct_type_index, field_index)? + .element_type, + StorageType::I8 | StorageType::I16 + )); + Ok(()) } - fn visit_f64x2_gt(&mut self) -> Self::Output { - self.check_v128_fbinary_op() + fn visit_struct_get_u(&mut self, struct_type_index: u32, field_index: u32) -> Self::Output { + let field_ty = self.struct_field_at(struct_type_index, field_index)?; + if !field_ty.element_type.is_packed() { + bail!( + self.offset, + "cannot use struct.get_u with non-packed storage types" + ) + } + self.pop_concrete_ref(true, struct_type_index)?; + self.push_operand(field_ty.element_type.unpack()) } - fn visit_f64x2_le(&mut self) -> Self::Output { - self.check_v128_fbinary_op() + fn visit_struct_atomic_get_u( + &mut self, + _ordering: Ordering, + struct_type_index: u32, + field_index: u32, + ) -> Self::Output { + self.visit_struct_get_s(struct_type_index, field_index)?; + // This instruction has the same type restrictions as the non-`atomic` version. + debug_assert!(matches!( + self.struct_field_at(struct_type_index, field_index)? + .element_type, + StorageType::I8 | StorageType::I16 + )); + Ok(()) } - fn visit_f64x2_ge(&mut self) -> Self::Output { - self.check_v128_fbinary_op() + fn visit_struct_set(&mut self, struct_type_index: u32, field_index: u32) -> Self::Output { + let field_ty = self.mutable_struct_field_at(struct_type_index, field_index)?; + self.pop_operand(Some(field_ty.element_type.unpack()))?; + self.pop_concrete_ref(true, struct_type_index)?; + Ok(()) } - fn visit_f32x4_add(&mut self) -> Self::Output { - self.check_v128_fbinary_op() + fn visit_struct_atomic_set( + &mut self, + _ordering: Ordering, + struct_type_index: u32, + field_index: u32, + ) -> Self::Output { + self.visit_struct_set(struct_type_index, field_index)?; + // The `atomic` version has some additional type restrictions. + let ty = self + .struct_field_at(struct_type_index, field_index)? + .element_type; + let is_valid_type = match ty { + StorageType::I8 | StorageType::I16 => true, + StorageType::Val(ValType::I32) | StorageType::Val(ValType::I64) => true, + StorageType::Val(v) => self + .resources + .is_subtype(v, RefType::ANYREF.shared().unwrap().into()), + }; + if !is_valid_type { + bail!( + self.offset, + "invalid type: `struct.atomic.set` only allows `i8`, `i16`, `i32`, `i64` and subtypes of `anyref`" + ); + } + Ok(()) } - fn visit_f32x4_sub(&mut self) -> Self::Output { - self.check_v128_fbinary_op() + fn visit_struct_atomic_rmw_add( + &mut self, + _ordering: Ordering, + struct_type_index: u32, + field_index: u32, + ) -> Self::Output { + self.check_struct_atomic_rmw("add", struct_type_index, field_index) } - fn visit_f32x4_mul(&mut self) -> Self::Output { - self.check_v128_fbinary_op() + fn visit_struct_atomic_rmw_sub( + &mut self, + _ordering: Ordering, + struct_type_index: u32, + field_index: u32, + ) -> Self::Output { + self.check_struct_atomic_rmw("sub", struct_type_index, field_index) } - fn visit_f32x4_div(&mut self) -> Self::Output { - self.check_v128_fbinary_op() + fn visit_struct_atomic_rmw_and( + &mut self, + _ordering: Ordering, + struct_type_index: u32, + field_index: u32, + ) -> Self::Output { + self.check_struct_atomic_rmw("and", struct_type_index, field_index) } - fn visit_f32x4_min(&mut self) -> Self::Output { - self.check_v128_fbinary_op() + fn visit_struct_atomic_rmw_or( + &mut self, + _ordering: Ordering, + struct_type_index: u32, + field_index: u32, + ) -> Self::Output { + self.check_struct_atomic_rmw("or", struct_type_index, field_index) } - fn visit_f32x4_max(&mut self) -> Self::Output { - self.check_v128_fbinary_op() + fn visit_struct_atomic_rmw_xor( + &mut self, + _ordering: Ordering, + struct_type_index: u32, + field_index: u32, + ) -> Self::Output { + self.check_struct_atomic_rmw("xor", struct_type_index, field_index) } - fn visit_f32x4_pmin(&mut self) -> Self::Output { - self.check_v128_fbinary_op() + fn visit_struct_atomic_rmw_xchg( + &mut self, + _ordering: Ordering, + struct_type_index: u32, + field_index: u32, + ) -> Self::Output { + let field = self.mutable_struct_field_at(struct_type_index, field_index)?; + let is_valid_type = match field.element_type { + StorageType::Val(ValType::I32) | StorageType::Val(ValType::I64) => true, + StorageType::Val(v) => self + .resources + .is_subtype(v, RefType::ANYREF.shared().unwrap().into()), + _ => false, + }; + if !is_valid_type { + bail!( + self.offset, + "invalid type: `struct.atomic.rmw.xchg` only allows `i32`, `i64` and subtypes of `anyref`" + ); + } + let field_ty = field.element_type.unpack(); + self.pop_operand(Some(field_ty))?; + self.pop_concrete_ref(true, struct_type_index)?; + self.push_operand(field_ty)?; + Ok(()) } - fn visit_f32x4_pmax(&mut self) -> Self::Output { - self.check_v128_fbinary_op() + fn visit_struct_atomic_rmw_cmpxchg( + &mut self, + _ordering: Ordering, + struct_type_index: u32, + field_index: u32, + ) -> Self::Output { + let field = self.mutable_struct_field_at(struct_type_index, field_index)?; + let is_valid_type = match field.element_type { + StorageType::Val(ValType::I32) | StorageType::Val(ValType::I64) => true, + StorageType::Val(v) => self + .resources + .is_subtype(v, RefType::EQREF.shared().unwrap().into()), + _ => false, + }; + if !is_valid_type { + bail!( + self.offset, + "invalid type: `struct.atomic.rmw.cmpxchg` only allows `i32`, `i64` and subtypes of `eqref`" + ); + } + let field_ty = field.element_type.unpack(); + self.pop_operand(Some(field_ty))?; + self.pop_operand(Some(field_ty))?; + self.pop_concrete_ref(true, struct_type_index)?; + self.push_operand(field_ty)?; + Ok(()) } - fn visit_f64x2_add(&mut self) -> Self::Output { - self.check_v128_fbinary_op() + fn visit_array_new(&mut self, type_index: u32) -> Self::Output { + let array_ty = self.array_type_at(type_index)?; + self.pop_operand(Some(ValType::I32))?; + self.pop_operand(Some(array_ty.element_type.unpack()))?; + self.push_concrete_ref(false, type_index) } - fn visit_f64x2_sub(&mut self) -> Self::Output { - self.check_v128_fbinary_op() - } - fn visit_f64x2_mul(&mut self) -> Self::Output { - self.check_v128_fbinary_op() - } - fn visit_f64x2_div(&mut self) -> Self::Output { - self.check_v128_fbinary_op() - } - fn visit_f64x2_min(&mut self) -> Self::Output { - self.check_v128_fbinary_op() - } - fn visit_f64x2_max(&mut self) -> Self::Output { - self.check_v128_fbinary_op() + fn visit_array_new_default(&mut self, type_index: u32) -> Self::Output { + let ty = self.array_type_at(type_index)?; + let val_ty = ty.element_type.unpack(); + if !val_ty.is_defaultable() { + bail!( + self.offset, + "invalid `array.new_default`: {val_ty} field is not defaultable" + ); + } + self.pop_operand(Some(ValType::I32))?; + self.push_concrete_ref(false, type_index) } - fn visit_f64x2_pmin(&mut self) -> Self::Output { - self.check_v128_fbinary_op() + fn visit_array_new_fixed(&mut self, type_index: u32, n: u32) -> Self::Output { + let array_ty = self.array_type_at(type_index)?; + let elem_ty = array_ty.element_type.unpack(); + for _ in 0..n { + self.pop_operand(Some(elem_ty))?; + } + self.push_concrete_ref(false, type_index) } - fn visit_f64x2_pmax(&mut self) -> Self::Output { - self.check_v128_fbinary_op() + fn visit_array_new_data(&mut self, type_index: u32, data_index: u32) -> Self::Output { + let array_ty = self.array_type_at(type_index)?; + let elem_ty = array_ty.element_type.unpack(); + match elem_ty { + ValType::I32 | ValType::I64 | ValType::F32 | ValType::F64 | ValType::V128 => {} + ValType::Ref(_) => bail!( + self.offset, + "type mismatch: array.new_data can only create arrays with numeric and vector elements" + ), + } + self.check_data_segment(data_index)?; + self.pop_operand(Some(ValType::I32))?; + self.pop_operand(Some(ValType::I32))?; + self.push_concrete_ref(false, type_index) } - fn visit_i8x16_eq(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_array_new_elem(&mut self, type_index: u32, elem_index: u32) -> Self::Output { + let array_ty = self.array_type_at(type_index)?; + let array_ref_ty = match array_ty.element_type.unpack() { + ValType::Ref(rt) => rt, + ValType::I32 | ValType::I64 | ValType::F32 | ValType::F64 | ValType::V128 => bail!( + self.offset, + "type mismatch: array.new_elem can only create arrays with reference elements" + ), + }; + let elem_ref_ty = self.element_type_at(elem_index)?; + if !self + .resources + .is_subtype(elem_ref_ty.into(), array_ref_ty.into()) + { + bail!( + self.offset, + "invalid array.new_elem instruction: element segment {elem_index} type mismatch: \ + expected {array_ref_ty}, found {elem_ref_ty}" + ) + } + self.pop_operand(Some(ValType::I32))?; + self.pop_operand(Some(ValType::I32))?; + self.push_concrete_ref(false, type_index) } - fn visit_i8x16_ne(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_array_get(&mut self, type_index: u32) -> Self::Output { + let array_ty = self.array_type_at(type_index)?; + let elem_ty = array_ty.element_type; + if elem_ty.is_packed() { + bail!( + self.offset, + "cannot use array.get with packed storage types" + ) + } + self.pop_operand(Some(ValType::I32))?; + self.pop_concrete_ref(true, type_index)?; + self.push_operand(elem_ty.unpack()) } - fn visit_i8x16_lt_s(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_array_atomic_get(&mut self, _ordering: Ordering, type_index: u32) -> Self::Output { + self.visit_array_get(type_index)?; + // The `atomic` version has some additional type restrictions. + let elem_ty = self.array_type_at(type_index)?.element_type; + let is_valid_type = match elem_ty { + StorageType::Val(ValType::I32) | StorageType::Val(ValType::I64) => true, + StorageType::Val(v) => self + .resources + .is_subtype(v, RefType::ANYREF.shared().unwrap().into()), + _ => false, + }; + if !is_valid_type { + bail!( + self.offset, + "invalid type: `array.atomic.get` only allows `i32`, `i64` and subtypes of `anyref`" + ); + } + Ok(()) } - fn visit_i8x16_lt_u(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_array_get_s(&mut self, type_index: u32) -> Self::Output { + let array_ty = self.array_type_at(type_index)?; + let elem_ty = array_ty.element_type; + if !elem_ty.is_packed() { + bail!( + self.offset, + "cannot use array.get_s with non-packed storage types" + ) + } + self.pop_operand(Some(ValType::I32))?; + self.pop_concrete_ref(true, type_index)?; + self.push_operand(elem_ty.unpack()) } - fn visit_i8x16_gt_s(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_array_atomic_get_s(&mut self, _ordering: Ordering, type_index: u32) -> Self::Output { + self.visit_array_get_s(type_index)?; + // This instruction has the same type restrictions as the non-`atomic` version. + debug_assert!(matches!( + self.array_type_at(type_index)?.element_type, + StorageType::I8 | StorageType::I16 + )); + Ok(()) } - fn visit_i8x16_gt_u(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_array_get_u(&mut self, type_index: u32) -> Self::Output { + let array_ty = self.array_type_at(type_index)?; + let elem_ty = array_ty.element_type; + if !elem_ty.is_packed() { + bail!( + self.offset, + "cannot use array.get_u with non-packed storage types" + ) + } + self.pop_operand(Some(ValType::I32))?; + self.pop_concrete_ref(true, type_index)?; + self.push_operand(elem_ty.unpack()) } - fn visit_i8x16_le_s(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_array_atomic_get_u(&mut self, _ordering: Ordering, type_index: u32) -> Self::Output { + self.visit_array_get_u(type_index)?; + // This instruction has the same type restrictions as the non-`atomic` version. + debug_assert!(matches!( + self.array_type_at(type_index)?.element_type, + StorageType::I8 | StorageType::I16 + )); + Ok(()) } - fn visit_i8x16_le_u(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_array_set(&mut self, type_index: u32) -> Self::Output { + let array_ty = self.mutable_array_type_at(type_index)?; + self.pop_operand(Some(array_ty.element_type.unpack()))?; + self.pop_operand(Some(ValType::I32))?; + self.pop_concrete_ref(true, type_index)?; + Ok(()) } - fn visit_i8x16_ge_s(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_array_atomic_set(&mut self, _ordering: Ordering, type_index: u32) -> Self::Output { + self.visit_array_set(type_index)?; + // The `atomic` version has some additional type restrictions. + let elem_ty = self.array_type_at(type_index)?.element_type; + let is_valid_type = match elem_ty { + StorageType::I8 | StorageType::I16 => true, + StorageType::Val(ValType::I32) | StorageType::Val(ValType::I64) => true, + StorageType::Val(v) => self + .resources + .is_subtype(v, RefType::ANYREF.shared().unwrap().into()), + }; + if !is_valid_type { + bail!( + self.offset, + "invalid type: `array.atomic.set` only allows `i8`, `i16`, `i32`, `i64` and subtypes of `anyref`" + ); + } + Ok(()) } - fn visit_i8x16_ge_u(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_array_len(&mut self) -> Self::Output { + self.pop_maybe_shared_ref(AbstractHeapType::Array)?; + self.push_operand(ValType::I32) } - fn visit_i16x8_eq(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_array_fill(&mut self, array_type_index: u32) -> Self::Output { + let array_ty = self.mutable_array_type_at(array_type_index)?; + self.pop_operand(Some(ValType::I32))?; + self.pop_operand(Some(array_ty.element_type.unpack()))?; + self.pop_operand(Some(ValType::I32))?; + self.pop_concrete_ref(true, array_type_index)?; + Ok(()) } - fn visit_i16x8_ne(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_array_copy(&mut self, type_index_dst: u32, type_index_src: u32) -> Self::Output { + let array_ty_dst = self.mutable_array_type_at(type_index_dst)?; + let array_ty_src = self.array_type_at(type_index_src)?; + match (array_ty_dst.element_type, array_ty_src.element_type) { + (StorageType::I8, StorageType::I8) => {} + (StorageType::I8, ty) => bail!( + self.offset, + "array types do not match: expected i8, found {ty}" + ), + (StorageType::I16, StorageType::I16) => {} + (StorageType::I16, ty) => bail!( + self.offset, + "array types do not match: expected i16, found {ty}" + ), + (StorageType::Val(dst), StorageType::Val(src)) => { + if !self.resources.is_subtype(src, dst) { + bail!( + self.offset, + "array types do not match: expected {dst}, found {src}" + ) + } + } + (StorageType::Val(dst), src) => { + bail!( + self.offset, + "array types do not match: expected {dst}, found {src}" + ) + } + } + self.pop_operand(Some(ValType::I32))?; + self.pop_operand(Some(ValType::I32))?; + self.pop_concrete_ref(true, type_index_src)?; + self.pop_operand(Some(ValType::I32))?; + self.pop_concrete_ref(true, type_index_dst)?; + Ok(()) } - fn visit_i16x8_lt_s(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_array_init_data( + &mut self, + array_type_index: u32, + array_data_index: u32, + ) -> Self::Output { + let array_ty = self.mutable_array_type_at(array_type_index)?; + let val_ty = array_ty.element_type.unpack(); + match val_ty { + ValType::I32 | ValType::I64 | ValType::F32 | ValType::F64 | ValType::V128 => {} + ValType::Ref(_) => bail!( + self.offset, + "invalid array.init_data: array type is not numeric or vector" + ), + } + self.check_data_segment(array_data_index)?; + self.pop_operand(Some(ValType::I32))?; + self.pop_operand(Some(ValType::I32))?; + self.pop_operand(Some(ValType::I32))?; + self.pop_concrete_ref(true, array_type_index)?; + Ok(()) } - fn visit_i16x8_lt_u(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_array_init_elem(&mut self, type_index: u32, elem_index: u32) -> Self::Output { + let array_ty = self.mutable_array_type_at(type_index)?; + let array_ref_ty = match array_ty.element_type.unpack() { + ValType::Ref(rt) => rt, + ValType::I32 | ValType::I64 | ValType::F32 | ValType::F64 | ValType::V128 => bail!( + self.offset, + "type mismatch: array.init_elem can only create arrays with reference elements" + ), + }; + let elem_ref_ty = self.element_type_at(elem_index)?; + if !self + .resources + .is_subtype(elem_ref_ty.into(), array_ref_ty.into()) + { + bail!( + self.offset, + "invalid array.init_elem instruction: element segment {elem_index} type mismatch: \ + expected {array_ref_ty}, found {elem_ref_ty}" + ) + } + self.pop_operand(Some(ValType::I32))?; + self.pop_operand(Some(ValType::I32))?; + self.pop_operand(Some(ValType::I32))?; + self.pop_concrete_ref(true, type_index)?; + Ok(()) } - fn visit_i16x8_gt_s(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_array_atomic_rmw_add(&mut self, _ordering: Ordering, type_index: u32) -> Self::Output { + self.check_array_atomic_rmw("add", type_index) } - fn visit_i16x8_gt_u(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_array_atomic_rmw_sub(&mut self, _ordering: Ordering, type_index: u32) -> Self::Output { + self.check_array_atomic_rmw("sub", type_index) } - fn visit_i16x8_le_s(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_array_atomic_rmw_and(&mut self, _ordering: Ordering, type_index: u32) -> Self::Output { + self.check_array_atomic_rmw("and", type_index) } - fn visit_i16x8_le_u(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_array_atomic_rmw_or(&mut self, _ordering: Ordering, type_index: u32) -> Self::Output { + self.check_array_atomic_rmw("or", type_index) } - fn visit_i16x8_ge_s(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_array_atomic_rmw_xor(&mut self, _ordering: Ordering, type_index: u32) -> Self::Output { + self.check_array_atomic_rmw("xor", type_index) } - fn visit_i16x8_ge_u(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_array_atomic_rmw_xchg( + &mut self, + _ordering: Ordering, + type_index: u32, + ) -> Self::Output { + let field = self.mutable_array_type_at(type_index)?; + let is_valid_type = match field.element_type { + StorageType::Val(ValType::I32) | StorageType::Val(ValType::I64) => true, + StorageType::Val(v) => self + .resources + .is_subtype(v, RefType::ANYREF.shared().unwrap().into()), + _ => false, + }; + if !is_valid_type { + bail!( + self.offset, + "invalid type: `array.atomic.rmw.xchg` only allows `i32`, `i64` and subtypes of `anyref`" + ); + } + let elem_ty = field.element_type.unpack(); + self.pop_operand(Some(elem_ty))?; + self.pop_operand(Some(ValType::I32))?; + self.pop_concrete_ref(true, type_index)?; + self.push_operand(elem_ty)?; + Ok(()) } - fn visit_i32x4_eq(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_array_atomic_rmw_cmpxchg( + &mut self, + _ordering: Ordering, + type_index: u32, + ) -> Self::Output { + let field = self.mutable_array_type_at(type_index)?; + let is_valid_type = match field.element_type { + StorageType::Val(ValType::I32) | StorageType::Val(ValType::I64) => true, + StorageType::Val(v) => self + .resources + .is_subtype(v, RefType::EQREF.shared().unwrap().into()), + _ => false, + }; + if !is_valid_type { + bail!( + self.offset, + "invalid type: `array.atomic.rmw.cmpxchg` only allows `i32`, `i64` and subtypes of `eqref`" + ); + } + let elem_ty = field.element_type.unpack(); + self.pop_operand(Some(elem_ty))?; + self.pop_operand(Some(elem_ty))?; + self.pop_operand(Some(ValType::I32))?; + self.pop_concrete_ref(true, type_index)?; + self.push_operand(elem_ty)?; + Ok(()) } - fn visit_i32x4_ne(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_any_convert_extern(&mut self) -> Self::Output { + let any_ref = match self.pop_maybe_shared_ref(AbstractHeapType::Extern)? { + MaybeType::Bottom | MaybeType::UnknownRef(_) => { + MaybeType::UnknownRef(Some(AbstractHeapType::Any)) + } + MaybeType::Known(ty) => { + let shared = self.resources.is_shared(ty); + let heap_type = HeapType::Abstract { + shared, + ty: AbstractHeapType::Any, + }; + let any_ref = RefType::new(ty.is_nullable(), heap_type).unwrap(); + MaybeType::Known(any_ref) + } + }; + self.push_operand(any_ref) } - fn visit_i32x4_lt_s(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_extern_convert_any(&mut self) -> Self::Output { + let extern_ref = match self.pop_maybe_shared_ref(AbstractHeapType::Any)? { + MaybeType::Bottom | MaybeType::UnknownRef(_) => { + MaybeType::UnknownRef(Some(AbstractHeapType::Extern)) + } + MaybeType::Known(ty) => { + let shared = self.resources.is_shared(ty); + let heap_type = HeapType::Abstract { + shared, + ty: AbstractHeapType::Extern, + }; + let extern_ref = RefType::new(ty.is_nullable(), heap_type).unwrap(); + MaybeType::Known(extern_ref) + } + }; + self.push_operand(extern_ref) } - fn visit_i32x4_lt_u(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_ref_test_non_null(&mut self, heap_type: HeapType) -> Self::Output { + self.check_ref_test(false, heap_type) } - fn visit_i32x4_gt_s(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_ref_test_nullable(&mut self, heap_type: HeapType) -> Self::Output { + self.check_ref_test(true, heap_type) } - fn visit_i32x4_gt_u(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_ref_cast_non_null(&mut self, heap_type: HeapType) -> Self::Output { + self.check_ref_cast(false, heap_type) } - fn visit_i32x4_le_s(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_ref_cast_nullable(&mut self, heap_type: HeapType) -> Self::Output { + self.check_ref_cast(true, heap_type) } - fn visit_i32x4_le_u(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_br_on_cast( + &mut self, + relative_depth: u32, + mut from_ref_type: RefType, + mut to_ref_type: RefType, + ) -> Self::Output { + self.resources + .check_ref_type(&mut from_ref_type, self.offset)?; + self.resources + .check_ref_type(&mut to_ref_type, self.offset)?; + + if !self + .resources + .is_subtype(to_ref_type.into(), from_ref_type.into()) + { + bail!( + self.offset, + "type mismatch: expected {from_ref_type}, found {to_ref_type}" + ); + } + + let (block_ty, frame_kind) = self.jump(relative_depth)?; + let mut label_types = self.label_types(block_ty, frame_kind)?; + + match label_types.next_back() { + Some(label_ty) if self.resources.is_subtype(to_ref_type.into(), label_ty) => { + self.pop_operand(Some(from_ref_type.into()))?; + } + Some(label_ty) => bail!( + self.offset, + "type mismatch: casting to type {to_ref_type}, but it does not match \ + label result type {label_ty}" + ), + None => bail!( + self.offset, + "type mismatch: br_on_cast to label with empty types, must have a reference type" + ), + }; + + self.pop_push_label_types(label_types)?; + let diff_ty = RefType::difference(from_ref_type, to_ref_type); + self.push_operand(diff_ty)?; + Ok(()) } - fn visit_i32x4_ge_s(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_br_on_cast_fail( + &mut self, + relative_depth: u32, + mut from_ref_type: RefType, + mut to_ref_type: RefType, + ) -> Self::Output { + self.resources + .check_ref_type(&mut from_ref_type, self.offset)?; + self.resources + .check_ref_type(&mut to_ref_type, self.offset)?; + + if !self + .resources + .is_subtype(to_ref_type.into(), from_ref_type.into()) + { + bail!( + self.offset, + "type mismatch: expected {from_ref_type}, found {to_ref_type}" + ); + } + + let (block_ty, frame_kind) = self.jump(relative_depth)?; + let mut label_tys = self.label_types(block_ty, frame_kind)?; + + let diff_ty = RefType::difference(from_ref_type, to_ref_type); + match label_tys.next_back() { + Some(label_ty) if self.resources.is_subtype(diff_ty.into(), label_ty) => { + self.pop_operand(Some(from_ref_type.into()))?; + } + Some(label_ty) => bail!( + self.offset, + "type mismatch: expected label result type {label_ty}, found {diff_ty}" + ), + None => bail!( + self.offset, + "type mismatch: expected a reference type, found nothing" + ), + } + + self.pop_push_label_types(label_tys)?; + self.push_operand(to_ref_type)?; + Ok(()) } - fn visit_i32x4_ge_u(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_ref_i31(&mut self) -> Self::Output { + self.pop_operand(Some(ValType::I32))?; + self.push_operand(ValType::Ref(RefType::I31)) } - fn visit_i64x2_eq(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_ref_i31_shared(&mut self) -> Self::Output { + self.pop_operand(Some(ValType::I32))?; + self.push_operand(ValType::Ref( + RefType::I31.shared().expect("i31 is abstract"), + )) } - fn visit_i64x2_ne(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i64x2_lt_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i64x2_gt_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i64x2_le_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i64x2_ge_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_v128_and(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_i31_get_s(&mut self) -> Self::Output { + self.pop_maybe_shared_ref(AbstractHeapType::I31)?; + self.push_operand(ValType::I32) } - fn visit_v128_andnot(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_i31_get_u(&mut self) -> Self::Output { + self.pop_maybe_shared_ref(AbstractHeapType::I31)?; + self.push_operand(ValType::I32) } - fn visit_v128_or(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_try(&mut self, mut ty: BlockType) -> Self::Output { + self.check_block_type(&mut ty)?; + for ty in self.params(ty)?.rev() { + self.pop_operand(Some(ty))?; + } + self.push_ctrl(FrameKind::LegacyTry, ty)?; + Ok(()) } - fn visit_v128_xor(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_catch(&mut self, index: u32) -> Self::Output { + let frame = self.pop_ctrl()?; + if frame.kind != FrameKind::LegacyTry && frame.kind != FrameKind::LegacyCatch { + bail!(self.offset, "catch found outside of an `try` block"); + } + // Start a new frame and push `exnref` value. + let height = self.operands.len(); + let init_height = self.local_inits.push_ctrl(); + self.control.push(Frame { + kind: FrameKind::LegacyCatch, + block_type: frame.block_type, + height, + unreachable: false, + init_height, + }); + // Push exception argument types. + let ty = self.exception_tag_at(index)?; + for ty in ty.params() { + self.push_operand(*ty)?; + } + Ok(()) } - fn visit_i8x16_add(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_rethrow(&mut self, relative_depth: u32) -> Self::Output { + // This is not a jump, but we need to check that the `rethrow` + // targets an actual `catch` to get the exception. + let (_, kind) = self.jump(relative_depth)?; + if kind != FrameKind::LegacyCatch && kind != FrameKind::LegacyCatchAll { + bail!( + self.offset, + "invalid rethrow label: target was not a `catch` block" + ); + } + self.unreachable()?; + Ok(()) } - fn visit_i8x16_add_sat_s(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_delegate(&mut self, relative_depth: u32) -> Self::Output { + let frame = self.pop_ctrl()?; + if frame.kind != FrameKind::LegacyTry { + bail!(self.offset, "delegate found outside of an `try` block"); + } + // This operation is not a jump, but we need to check the + // depth for validity + let _ = self.jump(relative_depth)?; + for ty in self.results(frame.block_type)? { + self.push_operand(ty)?; + } + Ok(()) } - fn visit_i8x16_add_sat_u(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_catch_all(&mut self) -> Self::Output { + let frame = self.pop_ctrl()?; + if frame.kind == FrameKind::LegacyCatchAll { + bail!(self.offset, "only one catch_all allowed per `try` block"); + } else if frame.kind != FrameKind::LegacyTry && frame.kind != FrameKind::LegacyCatch { + bail!(self.offset, "catch_all found outside of a `try` block"); + } + let height = self.operands.len(); + let init_height = self.local_inits.push_ctrl(); + self.control.push(Frame { + kind: FrameKind::LegacyCatchAll, + block_type: frame.block_type, + height, + unreachable: false, + init_height, + }); + Ok(()) } - fn visit_i8x16_sub(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_cont_new(&mut self, type_index: u32) -> Self::Output { + let cont_ty = self.cont_type_at(type_index)?; + let rt = RefType::concrete(true, cont_ty.0); + self.pop_ref(Some(rt))?; + self.push_concrete_ref(false, type_index)?; + Ok(()) } - fn visit_i8x16_sub_sat_s(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_cont_bind(&mut self, argument_index: u32, result_index: u32) -> Self::Output { + // [ts1 ts1'] -> [ts2] + let arg_cont = self.cont_type_at(argument_index)?; + let arg_func = self.func_type_of_cont_type(arg_cont); + // [ts1''] -> [ts2'] + let res_cont = self.cont_type_at(result_index)?; + let res_func = self.func_type_of_cont_type(res_cont); + + // Verify that the argument's domain is at least as large as the + // result's domain. + if arg_func.params().len() < res_func.params().len() { + bail!(self.offset, "type mismatch in continuation arguments"); + } + + let argcnt = arg_func.params().len() - res_func.params().len(); + + // Check that [ts1'] -> [ts2] <: [ts1''] -> [ts2'] + if !self.is_subtype_many(res_func.params(), &arg_func.params()[argcnt..]) + || arg_func.results().len() != res_func.results().len() + || !self.is_subtype_many(arg_func.results(), res_func.results()) + { + bail!(self.offset, "type mismatch in continuation types"); + } + + // Check that the continuation is available on the stack. + self.pop_concrete_ref(true, argument_index)?; + + // Check that the argument prefix is available on the stack. + for &ty in arg_func.params().iter().take(argcnt).rev() { + self.pop_operand(Some(ty))?; + } + + // Construct the result type. + self.push_concrete_ref(false, result_index)?; + + Ok(()) } - fn visit_i8x16_sub_sat_u(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_suspend(&mut self, tag_index: u32) -> Self::Output { + let ft = &self.tag_at(tag_index)?; + for &ty in ft.params().iter().rev() { + self.pop_operand(Some(ty))?; + } + for &ty in ft.results() { + self.push_operand(ty)?; + } + Ok(()) } - fn visit_i8x16_min_s(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_resume(&mut self, type_index: u32, table: ResumeTable) -> Self::Output { + // [ts1] -> [ts2] + let ft = self.check_resume_table(table, type_index)?; + self.pop_concrete_ref(true, type_index)?; + // Check that ts1 are available on the stack. + for &ty in ft.params().iter().rev() { + self.pop_operand(Some(ty))?; + } + + // Make ts2 available on the stack. + for &ty in ft.results() { + self.push_operand(ty)?; + } + Ok(()) } - fn visit_i8x16_min_u(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_resume_throw( + &mut self, + type_index: u32, + tag_index: u32, + table: ResumeTable, + ) -> Self::Output { + // [ts1] -> [ts2] + let ft = self.check_resume_table(table, type_index)?; + // [ts1'] -> [] + let tag_ty = self.exception_tag_at(tag_index)?; + if tag_ty.results().len() != 0 { + bail!(self.offset, "type mismatch: non-empty tag result type") + } + self.pop_concrete_ref(true, type_index)?; + // Check that ts1' are available on the stack. + for &ty in tag_ty.params().iter().rev() { + self.pop_operand(Some(ty))?; + } + + // Make ts2 available on the stack. + for &ty in ft.results() { + self.push_operand(ty)?; + } + Ok(()) } - fn visit_i8x16_max_s(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_switch(&mut self, type_index: u32, tag_index: u32) -> Self::Output { + // [t1* (ref null $ct2)] -> [te1*] + let cont_ty = self.cont_type_at(type_index)?; + let func_ty = self.func_type_of_cont_type(cont_ty); + // [] -> [t*] + let tag_ty = self.tag_at(tag_index)?; + if tag_ty.params().len() != 0 { + bail!(self.offset, "type mismatch: non-empty tag parameter type") + } + // Extract the other continuation reference + match func_ty.params().last() { + Some(ValType::Ref(rt)) if rt.is_concrete_type_ref() => { + let other_cont_id = rt + .type_index() + .unwrap() + .unpack() + .as_core_type_id() + .expect("expected canonicalized index"); + let sub_ty = self.resources.sub_type_at_id(other_cont_id); + let other_cont_ty = + if let CompositeInnerType::Cont(cont) = &sub_ty.composite_type.inner { + cont + } else { + bail!(self.offset, "non-continuation type"); + }; + let other_func_ty = self.func_type_of_cont_type(&other_cont_ty); + if func_ty.results().len() != tag_ty.results().len() + || !self.is_subtype_many(func_ty.results(), tag_ty.results()) + || other_func_ty.results().len() != tag_ty.results().len() + || !self.is_subtype_many(tag_ty.results(), other_func_ty.results()) + { + bail!(self.offset, "type mismatch in continuation types") + } + + // Pop the continuation reference. + self.pop_concrete_ref(true, type_index)?; + + // Check that the arguments t1* are available on the + // stack. + for &ty in func_ty.params().iter().rev().skip(1) { + self.pop_operand(Some(ty))?; + } + + // Make the results t2* available on the stack. + for &ty in other_func_ty.params() { + self.push_operand(ty)?; + } + } + Some(ty) => bail!( + self.offset, + "type mismatch: expected a continuation reference, found {}", + ty_to_str(*ty) + ), + None => bail!( + self.offset, + "type mismatch: instruction requires a continuation reference" + ), + } + Ok(()) } - fn visit_i8x16_max_u(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_i64_add128(&mut self) -> Result<()> { + self.check_binop128() } - fn visit_i16x8_add(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_i64_sub128(&mut self) -> Result<()> { + self.check_binop128() } - fn visit_i16x8_add_sat_s(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_i64_mul_wide_s(&mut self) -> Result<()> { + self.check_i64_mul_wide() } - fn visit_i16x8_add_sat_u(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_i64_mul_wide_u(&mut self) -> Result<()> { + self.check_i64_mul_wide() } - fn visit_i16x8_sub(&mut self) -> Self::Output { - self.check_v128_binary_op() +} + +impl<'a, T> VisitSimdOperator<'a> for OperatorValidatorTemp<'_, '_, T> +where + T: WasmModuleResources, +{ + fn visit_v128_load(&mut self, memarg: MemArg) -> Self::Output { + let ty = self.check_memarg(memarg)?; + self.pop_operand(Some(ty))?; + self.push_operand(ValType::V128)?; + Ok(()) } - fn visit_i16x8_sub_sat_s(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_v128_store(&mut self, memarg: MemArg) -> Self::Output { + let ty = self.check_memarg(memarg)?; + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(ty))?; + Ok(()) } - fn visit_i16x8_sub_sat_u(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_v128_const(&mut self, _value: V128) -> Self::Output { + self.push_operand(ValType::V128)?; + Ok(()) } - fn visit_i16x8_mul(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_i8x16_splat(&mut self) -> Self::Output { + self.check_v128_splat(ValType::I32) } - fn visit_i16x8_min_s(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_i16x8_splat(&mut self) -> Self::Output { + self.check_v128_splat(ValType::I32) } - fn visit_i16x8_min_u(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_i32x4_splat(&mut self) -> Self::Output { + self.check_v128_splat(ValType::I32) } - fn visit_i16x8_max_s(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_i64x2_splat(&mut self) -> Self::Output { + self.check_v128_splat(ValType::I64) } - fn visit_i16x8_max_u(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_f32x4_splat(&mut self) -> Self::Output { + self.check_floats_enabled()?; + self.check_v128_splat(ValType::F32) } - fn visit_i32x4_add(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_f64x2_splat(&mut self) -> Self::Output { + self.check_floats_enabled()?; + self.check_v128_splat(ValType::F64) } - fn visit_i32x4_sub(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_i8x16_extract_lane_s(&mut self, lane: u8) -> Self::Output { + self.check_simd_lane_index(lane, 16)?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::I32)?; + Ok(()) } - fn visit_i32x4_mul(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_i8x16_extract_lane_u(&mut self, lane: u8) -> Self::Output { + self.visit_i8x16_extract_lane_s(lane) } - fn visit_i32x4_min_s(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_i16x8_extract_lane_s(&mut self, lane: u8) -> Self::Output { + self.check_simd_lane_index(lane, 8)?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::I32)?; + Ok(()) } - fn visit_i32x4_min_u(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_i16x8_extract_lane_u(&mut self, lane: u8) -> Self::Output { + self.visit_i16x8_extract_lane_s(lane) } - fn visit_i32x4_max_s(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_i32x4_extract_lane(&mut self, lane: u8) -> Self::Output { + self.check_simd_lane_index(lane, 4)?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::I32)?; + Ok(()) } - fn visit_i32x4_max_u(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_i8x16_replace_lane(&mut self, lane: u8) -> Self::Output { + self.check_simd_lane_index(lane, 16)?; + self.pop_operand(Some(ValType::I32))?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::V128)?; + Ok(()) } - fn visit_i32x4_dot_i16x8_s(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_i16x8_replace_lane(&mut self, lane: u8) -> Self::Output { + self.check_simd_lane_index(lane, 8)?; + self.pop_operand(Some(ValType::I32))?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::V128)?; + Ok(()) } - fn visit_i64x2_add(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_i32x4_replace_lane(&mut self, lane: u8) -> Self::Output { + self.check_simd_lane_index(lane, 4)?; + self.pop_operand(Some(ValType::I32))?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::V128)?; + Ok(()) } - fn visit_i64x2_sub(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_i64x2_extract_lane(&mut self, lane: u8) -> Self::Output { + self.check_simd_lane_index(lane, 2)?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::I64)?; + Ok(()) } - fn visit_i64x2_mul(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_i64x2_replace_lane(&mut self, lane: u8) -> Self::Output { + self.check_simd_lane_index(lane, 2)?; + self.pop_operand(Some(ValType::I64))?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::V128)?; + Ok(()) } - fn visit_i8x16_avgr_u(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_f32x4_extract_lane(&mut self, lane: u8) -> Self::Output { + self.check_floats_enabled()?; + self.check_simd_lane_index(lane, 4)?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::F32)?; + Ok(()) } - fn visit_i16x8_avgr_u(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_f32x4_replace_lane(&mut self, lane: u8) -> Self::Output { + self.check_floats_enabled()?; + self.check_simd_lane_index(lane, 4)?; + self.pop_operand(Some(ValType::F32))?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::V128)?; + Ok(()) } - fn visit_i8x16_narrow_i16x8_s(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_f64x2_extract_lane(&mut self, lane: u8) -> Self::Output { + self.check_floats_enabled()?; + self.check_simd_lane_index(lane, 2)?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::F64)?; + Ok(()) } - fn visit_i8x16_narrow_i16x8_u(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_f64x2_replace_lane(&mut self, lane: u8) -> Self::Output { + self.check_floats_enabled()?; + self.check_simd_lane_index(lane, 2)?; + self.pop_operand(Some(ValType::F64))?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::V128)?; + Ok(()) } - fn visit_i16x8_narrow_i32x4_s(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_f32x4_eq(&mut self) -> Self::Output { + self.check_v128_fbinary_op() } - fn visit_i16x8_narrow_i32x4_u(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_f32x4_ne(&mut self) -> Self::Output { + self.check_v128_fbinary_op() } - fn visit_i16x8_extmul_low_i8x16_s(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_f32x4_lt(&mut self) -> Self::Output { + self.check_v128_fbinary_op() } - fn visit_i16x8_extmul_high_i8x16_s(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_f32x4_gt(&mut self) -> Self::Output { + self.check_v128_fbinary_op() } - fn visit_i16x8_extmul_low_i8x16_u(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_f32x4_le(&mut self) -> Self::Output { + self.check_v128_fbinary_op() } - fn visit_i16x8_extmul_high_i8x16_u(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_f32x4_ge(&mut self) -> Self::Output { + self.check_v128_fbinary_op() } - fn visit_i32x4_extmul_low_i16x8_s(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_f64x2_eq(&mut self) -> Self::Output { + self.check_v128_fbinary_op() } - fn visit_i32x4_extmul_high_i16x8_s(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_f64x2_ne(&mut self) -> Self::Output { + self.check_v128_fbinary_op() } - fn visit_i32x4_extmul_low_i16x8_u(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_f64x2_lt(&mut self) -> Self::Output { + self.check_v128_fbinary_op() } - fn visit_i32x4_extmul_high_i16x8_u(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_f64x2_gt(&mut self) -> Self::Output { + self.check_v128_fbinary_op() } - fn visit_i64x2_extmul_low_i32x4_s(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_f64x2_le(&mut self) -> Self::Output { + self.check_v128_fbinary_op() } - fn visit_i64x2_extmul_high_i32x4_s(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_f64x2_ge(&mut self) -> Self::Output { + self.check_v128_fbinary_op() } - fn visit_i64x2_extmul_low_i32x4_u(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_f32x4_add(&mut self) -> Self::Output { + self.check_v128_fbinary_op() } - fn visit_i64x2_extmul_high_i32x4_u(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_f32x4_sub(&mut self) -> Self::Output { + self.check_v128_fbinary_op() } - fn visit_i16x8_q15mulr_sat_s(&mut self) -> Self::Output { - self.check_v128_binary_op() + fn visit_f32x4_mul(&mut self) -> Self::Output { + self.check_v128_fbinary_op() } - fn visit_f32x4_ceil(&mut self) -> Self::Output { - self.check_v128_funary_op() + fn visit_f32x4_div(&mut self) -> Self::Output { + self.check_v128_fbinary_op() } - fn visit_f32x4_floor(&mut self) -> Self::Output { - self.check_v128_funary_op() + fn visit_f32x4_min(&mut self) -> Self::Output { + self.check_v128_fbinary_op() } - fn visit_f32x4_trunc(&mut self) -> Self::Output { - self.check_v128_funary_op() + fn visit_f32x4_max(&mut self) -> Self::Output { + self.check_v128_fbinary_op() } - fn visit_f32x4_nearest(&mut self) -> Self::Output { - self.check_v128_funary_op() + fn visit_f32x4_pmin(&mut self) -> Self::Output { + self.check_v128_fbinary_op() } - fn visit_f64x2_ceil(&mut self) -> Self::Output { - self.check_v128_funary_op() + fn visit_f32x4_pmax(&mut self) -> Self::Output { + self.check_v128_fbinary_op() } - fn visit_f64x2_floor(&mut self) -> Self::Output { - self.check_v128_funary_op() + fn visit_f64x2_add(&mut self) -> Self::Output { + self.check_v128_fbinary_op() } - fn visit_f64x2_trunc(&mut self) -> Self::Output { - self.check_v128_funary_op() + fn visit_f64x2_sub(&mut self) -> Self::Output { + self.check_v128_fbinary_op() } - fn visit_f64x2_nearest(&mut self) -> Self::Output { - self.check_v128_funary_op() + fn visit_f64x2_mul(&mut self) -> Self::Output { + self.check_v128_fbinary_op() } - fn visit_f32x4_abs(&mut self) -> Self::Output { - self.check_v128_funary_op() + fn visit_f64x2_div(&mut self) -> Self::Output { + self.check_v128_fbinary_op() } - fn visit_f32x4_neg(&mut self) -> Self::Output { - self.check_v128_funary_op() + fn visit_f64x2_min(&mut self) -> Self::Output { + self.check_v128_fbinary_op() } - fn visit_f32x4_sqrt(&mut self) -> Self::Output { - self.check_v128_funary_op() + fn visit_f64x2_max(&mut self) -> Self::Output { + self.check_v128_fbinary_op() } - fn visit_f64x2_abs(&mut self) -> Self::Output { - self.check_v128_funary_op() + fn visit_f64x2_pmin(&mut self) -> Self::Output { + self.check_v128_fbinary_op() } - fn visit_f64x2_neg(&mut self) -> Self::Output { - self.check_v128_funary_op() + fn visit_f64x2_pmax(&mut self) -> Self::Output { + self.check_v128_fbinary_op() } - fn visit_f64x2_sqrt(&mut self) -> Self::Output { - self.check_v128_funary_op() + fn visit_i8x16_eq(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_f32x4_demote_f64x2_zero(&mut self) -> Self::Output { - self.check_v128_funary_op() + fn visit_i8x16_ne(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_f64x2_promote_low_f32x4(&mut self) -> Self::Output { - self.check_v128_funary_op() + fn visit_i8x16_lt_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_f64x2_convert_low_i32x4_s(&mut self) -> Self::Output { - self.check_v128_funary_op() + fn visit_i8x16_lt_u(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_f64x2_convert_low_i32x4_u(&mut self) -> Self::Output { - self.check_v128_funary_op() + fn visit_i8x16_gt_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i32x4_trunc_sat_f32x4_s(&mut self) -> Self::Output { - self.check_v128_funary_op() + fn visit_i8x16_gt_u(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i32x4_trunc_sat_f32x4_u(&mut self) -> Self::Output { - self.check_v128_funary_op() + fn visit_i8x16_le_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i32x4_trunc_sat_f64x2_s_zero(&mut self) -> Self::Output { - self.check_v128_funary_op() + fn visit_i8x16_le_u(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i32x4_trunc_sat_f64x2_u_zero(&mut self) -> Self::Output { - self.check_v128_funary_op() + fn visit_i8x16_ge_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_f32x4_convert_i32x4_s(&mut self) -> Self::Output { - self.check_v128_funary_op() + fn visit_i8x16_ge_u(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_f32x4_convert_i32x4_u(&mut self) -> Self::Output { - self.check_v128_funary_op() + fn visit_i16x8_eq(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_v128_not(&mut self) -> Self::Output { - self.check_v128_unary_op() + fn visit_i16x8_ne(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i8x16_abs(&mut self) -> Self::Output { - self.check_v128_unary_op() + fn visit_i16x8_lt_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i8x16_neg(&mut self) -> Self::Output { - self.check_v128_unary_op() + fn visit_i16x8_lt_u(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i8x16_popcnt(&mut self) -> Self::Output { - self.check_v128_unary_op() + fn visit_i16x8_gt_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i16x8_abs(&mut self) -> Self::Output { - self.check_v128_unary_op() + fn visit_i16x8_gt_u(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i16x8_neg(&mut self) -> Self::Output { - self.check_v128_unary_op() + fn visit_i16x8_le_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i32x4_abs(&mut self) -> Self::Output { - self.check_v128_unary_op() + fn visit_i16x8_le_u(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i32x4_neg(&mut self) -> Self::Output { - self.check_v128_unary_op() + fn visit_i16x8_ge_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i64x2_abs(&mut self) -> Self::Output { - self.check_v128_unary_op() + fn visit_i16x8_ge_u(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i64x2_neg(&mut self) -> Self::Output { - self.check_v128_unary_op() + fn visit_i32x4_eq(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i16x8_extend_low_i8x16_s(&mut self) -> Self::Output { - self.check_v128_unary_op() + fn visit_i32x4_ne(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i16x8_extend_high_i8x16_s(&mut self) -> Self::Output { - self.check_v128_unary_op() + fn visit_i32x4_lt_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i16x8_extend_low_i8x16_u(&mut self) -> Self::Output { - self.check_v128_unary_op() + fn visit_i32x4_lt_u(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i16x8_extend_high_i8x16_u(&mut self) -> Self::Output { - self.check_v128_unary_op() + fn visit_i32x4_gt_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i32x4_extend_low_i16x8_s(&mut self) -> Self::Output { - self.check_v128_unary_op() + fn visit_i32x4_gt_u(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i32x4_extend_high_i16x8_s(&mut self) -> Self::Output { - self.check_v128_unary_op() + fn visit_i32x4_le_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i32x4_extend_low_i16x8_u(&mut self) -> Self::Output { - self.check_v128_unary_op() + fn visit_i32x4_le_u(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i32x4_extend_high_i16x8_u(&mut self) -> Self::Output { - self.check_v128_unary_op() + fn visit_i32x4_ge_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i64x2_extend_low_i32x4_s(&mut self) -> Self::Output { - self.check_v128_unary_op() + fn visit_i32x4_ge_u(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i64x2_extend_high_i32x4_s(&mut self) -> Self::Output { - self.check_v128_unary_op() + fn visit_i64x2_eq(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i64x2_extend_low_i32x4_u(&mut self) -> Self::Output { - self.check_v128_unary_op() + fn visit_i64x2_ne(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i64x2_extend_high_i32x4_u(&mut self) -> Self::Output { - self.check_v128_unary_op() + fn visit_i64x2_lt_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i16x8_extadd_pairwise_i8x16_s(&mut self) -> Self::Output { - self.check_v128_unary_op() + fn visit_i64x2_gt_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i16x8_extadd_pairwise_i8x16_u(&mut self) -> Self::Output { - self.check_v128_unary_op() + fn visit_i64x2_le_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i32x4_extadd_pairwise_i16x8_s(&mut self) -> Self::Output { - self.check_v128_unary_op() + fn visit_i64x2_ge_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i32x4_extadd_pairwise_i16x8_u(&mut self) -> Self::Output { - self.check_v128_unary_op() + fn visit_v128_and(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_v128_bitselect(&mut self) -> Self::Output { - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::V128)?; - Ok(()) + fn visit_v128_andnot(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i8x16_relaxed_swizzle(&mut self) -> Self::Output { - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::V128)?; - Ok(()) + fn visit_v128_or(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i32x4_relaxed_trunc_f32x4_s(&mut self) -> Self::Output { - self.check_v128_unary_op() + fn visit_v128_xor(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i32x4_relaxed_trunc_f32x4_u(&mut self) -> Self::Output { - self.check_v128_unary_op() + fn visit_i8x16_add(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i32x4_relaxed_trunc_f64x2_s_zero(&mut self) -> Self::Output { - self.check_v128_unary_op() + fn visit_i8x16_add_sat_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i32x4_relaxed_trunc_f64x2_u_zero(&mut self) -> Self::Output { - self.check_v128_unary_op() + fn visit_i8x16_add_sat_u(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_f32x4_relaxed_madd(&mut self) -> Self::Output { - self.check_v128_ternary_op() + fn visit_i8x16_sub(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_f32x4_relaxed_nmadd(&mut self) -> Self::Output { - self.check_v128_ternary_op() + fn visit_i8x16_sub_sat_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_f64x2_relaxed_madd(&mut self) -> Self::Output { - self.check_v128_ternary_op() + fn visit_i8x16_sub_sat_u(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_f64x2_relaxed_nmadd(&mut self) -> Self::Output { - self.check_v128_ternary_op() + fn visit_i8x16_min_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i8x16_relaxed_laneselect(&mut self) -> Self::Output { - self.check_v128_ternary_op() + fn visit_i8x16_min_u(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i16x8_relaxed_laneselect(&mut self) -> Self::Output { - self.check_v128_ternary_op() + fn visit_i8x16_max_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i32x4_relaxed_laneselect(&mut self) -> Self::Output { - self.check_v128_ternary_op() + fn visit_i8x16_max_u(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i64x2_relaxed_laneselect(&mut self) -> Self::Output { - self.check_v128_ternary_op() + fn visit_i16x8_add(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_f32x4_relaxed_min(&mut self) -> Self::Output { + fn visit_i16x8_add_sat_s(&mut self) -> Self::Output { self.check_v128_binary_op() } - fn visit_f32x4_relaxed_max(&mut self) -> Self::Output { + fn visit_i16x8_add_sat_u(&mut self) -> Self::Output { self.check_v128_binary_op() } - fn visit_f64x2_relaxed_min(&mut self) -> Self::Output { + fn visit_i16x8_sub(&mut self) -> Self::Output { self.check_v128_binary_op() } - fn visit_f64x2_relaxed_max(&mut self) -> Self::Output { + fn visit_i16x8_sub_sat_s(&mut self) -> Self::Output { self.check_v128_binary_op() } - fn visit_i16x8_relaxed_q15mulr_s(&mut self) -> Self::Output { + fn visit_i16x8_sub_sat_u(&mut self) -> Self::Output { self.check_v128_binary_op() } - fn visit_i16x8_relaxed_dot_i8x16_i7x16_s(&mut self) -> Self::Output { + fn visit_i16x8_mul(&mut self) -> Self::Output { self.check_v128_binary_op() } - fn visit_i32x4_relaxed_dot_i8x16_i7x16_add_s(&mut self) -> Self::Output { - self.check_v128_ternary_op() + fn visit_i16x8_min_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_v128_any_true(&mut self) -> Self::Output { - self.check_v128_bitmask_op() + fn visit_i16x8_min_u(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i16x8_max_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i8x16_all_true(&mut self) -> Self::Output { - self.check_v128_bitmask_op() + fn visit_i16x8_max_u(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i8x16_bitmask(&mut self) -> Self::Output { - self.check_v128_bitmask_op() + fn visit_i32x4_add(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i16x8_all_true(&mut self) -> Self::Output { - self.check_v128_bitmask_op() + fn visit_i32x4_sub(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i16x8_bitmask(&mut self) -> Self::Output { - self.check_v128_bitmask_op() + fn visit_i32x4_mul(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i32x4_all_true(&mut self) -> Self::Output { - self.check_v128_bitmask_op() + fn visit_i32x4_min_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i32x4_bitmask(&mut self) -> Self::Output { - self.check_v128_bitmask_op() + fn visit_i32x4_min_u(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i64x2_all_true(&mut self) -> Self::Output { - self.check_v128_bitmask_op() + fn visit_i32x4_max_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i64x2_bitmask(&mut self) -> Self::Output { - self.check_v128_bitmask_op() + fn visit_i32x4_max_u(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i8x16_shl(&mut self) -> Self::Output { - self.check_v128_shift_op() + fn visit_i32x4_dot_i16x8_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i8x16_shr_s(&mut self) -> Self::Output { - self.check_v128_shift_op() + fn visit_i64x2_add(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i8x16_shr_u(&mut self) -> Self::Output { - self.check_v128_shift_op() + fn visit_i64x2_sub(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i16x8_shl(&mut self) -> Self::Output { - self.check_v128_shift_op() + fn visit_i64x2_mul(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i16x8_shr_s(&mut self) -> Self::Output { - self.check_v128_shift_op() + fn visit_i8x16_avgr_u(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i16x8_shr_u(&mut self) -> Self::Output { - self.check_v128_shift_op() + fn visit_i16x8_avgr_u(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i32x4_shl(&mut self) -> Self::Output { - self.check_v128_shift_op() + fn visit_i8x16_narrow_i16x8_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i32x4_shr_s(&mut self) -> Self::Output { - self.check_v128_shift_op() + fn visit_i8x16_narrow_i16x8_u(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i32x4_shr_u(&mut self) -> Self::Output { - self.check_v128_shift_op() + fn visit_i16x8_narrow_i32x4_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i64x2_shl(&mut self) -> Self::Output { - self.check_v128_shift_op() + fn visit_i16x8_narrow_i32x4_u(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i64x2_shr_s(&mut self) -> Self::Output { - self.check_v128_shift_op() + fn visit_i16x8_extmul_low_i8x16_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i64x2_shr_u(&mut self) -> Self::Output { - self.check_v128_shift_op() + fn visit_i16x8_extmul_high_i8x16_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i8x16_swizzle(&mut self) -> Self::Output { - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::V128)?; - Ok(()) + fn visit_i16x8_extmul_low_i8x16_u(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_i8x16_shuffle(&mut self, lanes: [u8; 16]) -> Self::Output { - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(ValType::V128))?; - for i in lanes { - self.check_simd_lane_index(i, 32)?; - } - self.push_operand(ValType::V128)?; - Ok(()) + fn visit_i16x8_extmul_high_i8x16_u(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_v128_load8_splat(&mut self, memarg: MemArg) -> Self::Output { - let ty = self.check_memarg(memarg)?; - self.pop_operand(Some(ty))?; - self.push_operand(ValType::V128)?; - Ok(()) + fn visit_i32x4_extmul_low_i16x8_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_v128_load16_splat(&mut self, memarg: MemArg) -> Self::Output { - let ty = self.check_memarg(memarg)?; - self.pop_operand(Some(ty))?; - self.push_operand(ValType::V128)?; - Ok(()) + fn visit_i32x4_extmul_high_i16x8_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_v128_load32_splat(&mut self, memarg: MemArg) -> Self::Output { - let ty = self.check_memarg(memarg)?; - self.pop_operand(Some(ty))?; - self.push_operand(ValType::V128)?; - Ok(()) + fn visit_i32x4_extmul_low_i16x8_u(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_v128_load32_zero(&mut self, memarg: MemArg) -> Self::Output { - self.visit_v128_load32_splat(memarg) + fn visit_i32x4_extmul_high_i16x8_u(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_v128_load64_splat(&mut self, memarg: MemArg) -> Self::Output { - self.check_v128_load_op(memarg) + fn visit_i64x2_extmul_low_i32x4_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_v128_load64_zero(&mut self, memarg: MemArg) -> Self::Output { - self.check_v128_load_op(memarg) + fn visit_i64x2_extmul_high_i32x4_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_v128_load8x8_s(&mut self, memarg: MemArg) -> Self::Output { - self.check_v128_load_op(memarg) + fn visit_i64x2_extmul_low_i32x4_u(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_v128_load8x8_u(&mut self, memarg: MemArg) -> Self::Output { - self.check_v128_load_op(memarg) + fn visit_i64x2_extmul_high_i32x4_u(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_v128_load16x4_s(&mut self, memarg: MemArg) -> Self::Output { - self.check_v128_load_op(memarg) + fn visit_i16x8_q15mulr_sat_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_v128_load16x4_u(&mut self, memarg: MemArg) -> Self::Output { - self.check_v128_load_op(memarg) + fn visit_f32x4_ceil(&mut self) -> Self::Output { + self.check_v128_funary_op() } - fn visit_v128_load32x2_s(&mut self, memarg: MemArg) -> Self::Output { - self.check_v128_load_op(memarg) + fn visit_f32x4_floor(&mut self) -> Self::Output { + self.check_v128_funary_op() } - fn visit_v128_load32x2_u(&mut self, memarg: MemArg) -> Self::Output { - self.check_v128_load_op(memarg) + fn visit_f32x4_trunc(&mut self) -> Self::Output { + self.check_v128_funary_op() } - fn visit_v128_load8_lane(&mut self, memarg: MemArg, lane: u8) -> Self::Output { - let idx = self.check_memarg(memarg)?; - self.check_simd_lane_index(lane, 16)?; - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(idx))?; - self.push_operand(ValType::V128)?; - Ok(()) + fn visit_f32x4_nearest(&mut self) -> Self::Output { + self.check_v128_funary_op() } - fn visit_v128_load16_lane(&mut self, memarg: MemArg, lane: u8) -> Self::Output { - let idx = self.check_memarg(memarg)?; - self.check_simd_lane_index(lane, 8)?; - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(idx))?; - self.push_operand(ValType::V128)?; - Ok(()) + fn visit_f64x2_ceil(&mut self) -> Self::Output { + self.check_v128_funary_op() } - fn visit_v128_load32_lane(&mut self, memarg: MemArg, lane: u8) -> Self::Output { - let idx = self.check_memarg(memarg)?; - self.check_simd_lane_index(lane, 4)?; - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(idx))?; - self.push_operand(ValType::V128)?; - Ok(()) + fn visit_f64x2_floor(&mut self) -> Self::Output { + self.check_v128_funary_op() } - fn visit_v128_load64_lane(&mut self, memarg: MemArg, lane: u8) -> Self::Output { - let idx = self.check_memarg(memarg)?; - self.check_simd_lane_index(lane, 2)?; - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(idx))?; - self.push_operand(ValType::V128)?; - Ok(()) + fn visit_f64x2_trunc(&mut self) -> Self::Output { + self.check_v128_funary_op() } - fn visit_v128_store8_lane(&mut self, memarg: MemArg, lane: u8) -> Self::Output { - let idx = self.check_memarg(memarg)?; - self.check_simd_lane_index(lane, 16)?; - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(idx))?; - Ok(()) + fn visit_f64x2_nearest(&mut self) -> Self::Output { + self.check_v128_funary_op() } - fn visit_v128_store16_lane(&mut self, memarg: MemArg, lane: u8) -> Self::Output { - let idx = self.check_memarg(memarg)?; - self.check_simd_lane_index(lane, 8)?; - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(idx))?; - Ok(()) + fn visit_f32x4_abs(&mut self) -> Self::Output { + self.check_v128_funary_op() } - fn visit_v128_store32_lane(&mut self, memarg: MemArg, lane: u8) -> Self::Output { - let idx = self.check_memarg(memarg)?; - self.check_simd_lane_index(lane, 4)?; - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(idx))?; - Ok(()) + fn visit_f32x4_neg(&mut self) -> Self::Output { + self.check_v128_funary_op() } - fn visit_v128_store64_lane(&mut self, memarg: MemArg, lane: u8) -> Self::Output { - let idx = self.check_memarg(memarg)?; - self.check_simd_lane_index(lane, 2)?; - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(idx))?; - Ok(()) + fn visit_f32x4_sqrt(&mut self) -> Self::Output { + self.check_v128_funary_op() } - fn visit_memory_init(&mut self, segment: u32, mem: u32) -> Self::Output { - let ty = self.check_memory_index(mem)?; - self.check_data_segment(segment)?; - self.pop_operand(Some(ValType::I32))?; - self.pop_operand(Some(ValType::I32))?; - self.pop_operand(Some(ty))?; - Ok(()) + fn visit_f64x2_abs(&mut self) -> Self::Output { + self.check_v128_funary_op() } - fn visit_data_drop(&mut self, segment: u32) -> Self::Output { - self.check_data_segment(segment)?; - Ok(()) + fn visit_f64x2_neg(&mut self) -> Self::Output { + self.check_v128_funary_op() } - fn visit_memory_copy(&mut self, dst: u32, src: u32) -> Self::Output { - let dst_ty = self.check_memory_index(dst)?; - let src_ty = self.check_memory_index(src)?; - - // The length operand here is the smaller of src/dst, which is - // i32 if one is i32 - self.pop_operand(Some(match src_ty { - ValType::I32 => ValType::I32, - _ => dst_ty, - }))?; - - // ... and the offset into each memory is required to be - // whatever the indexing type is for that memory - self.pop_operand(Some(src_ty))?; - self.pop_operand(Some(dst_ty))?; - Ok(()) + fn visit_f64x2_sqrt(&mut self) -> Self::Output { + self.check_v128_funary_op() } - fn visit_memory_fill(&mut self, mem: u32) -> Self::Output { - let ty = self.check_memory_index(mem)?; - self.pop_operand(Some(ty))?; - self.pop_operand(Some(ValType::I32))?; - self.pop_operand(Some(ty))?; - Ok(()) + fn visit_f32x4_demote_f64x2_zero(&mut self) -> Self::Output { + self.check_v128_funary_op() } - fn visit_memory_discard(&mut self, mem: u32) -> Self::Output { - let ty = self.check_memory_index(mem)?; - self.pop_operand(Some(ty))?; - self.pop_operand(Some(ty))?; - Ok(()) + fn visit_f64x2_promote_low_f32x4(&mut self) -> Self::Output { + self.check_v128_funary_op() } - fn visit_table_init(&mut self, segment: u32, table: u32) -> Self::Output { - let table = self.table_type_at(table)?; - let segment_ty = self.element_type_at(segment)?; - if !self - .resources - .is_subtype(ValType::Ref(segment_ty), ValType::Ref(table.element_type)) - { - bail!(self.offset, "type mismatch"); - } - self.pop_operand(Some(ValType::I32))?; - self.pop_operand(Some(ValType::I32))?; - self.pop_operand(Some(table.index_type()))?; - Ok(()) + fn visit_f64x2_convert_low_i32x4_s(&mut self) -> Self::Output { + self.check_v128_funary_op() } - fn visit_elem_drop(&mut self, segment: u32) -> Self::Output { - self.element_type_at(segment)?; - Ok(()) + fn visit_f64x2_convert_low_i32x4_u(&mut self) -> Self::Output { + self.check_v128_funary_op() } - fn visit_table_copy(&mut self, dst_table: u32, src_table: u32) -> Self::Output { - let src = self.table_type_at(src_table)?; - let dst = self.table_type_at(dst_table)?; - if !self.resources.is_subtype( - ValType::Ref(src.element_type), - ValType::Ref(dst.element_type), - ) { - bail!(self.offset, "type mismatch"); - } - - // The length operand here is the smaller of src/dst, which is - // i32 if one is i32 - self.pop_operand(Some(match src.index_type() { - ValType::I32 => ValType::I32, - _ => dst.index_type(), - }))?; - - // ... and the offset into each table is required to be - // whatever the indexing type is for that table - self.pop_operand(Some(src.index_type()))?; - self.pop_operand(Some(dst.index_type()))?; - Ok(()) + fn visit_i32x4_trunc_sat_f32x4_s(&mut self) -> Self::Output { + self.check_v128_funary_op() } - fn visit_table_get(&mut self, table: u32) -> Self::Output { - let table = self.table_type_at(table)?; - debug_assert_type_indices_are_ids(table.element_type.into()); - self.pop_operand(Some(table.index_type()))?; - self.push_operand(table.element_type)?; - Ok(()) + fn visit_i32x4_trunc_sat_f32x4_u(&mut self) -> Self::Output { + self.check_v128_funary_op() } - fn visit_table_atomic_get(&mut self, _ordering: Ordering, table: u32) -> Self::Output { - self.visit_table_get(table)?; - // No validation of `ordering` is needed because `table.atomic.get` can - // be used on both shared and unshared tables. But we do need to limit - // which types can be used with this instruction. - let ty = self.table_type_at(table)?.element_type; - let supertype = RefType::ANYREF.shared().unwrap(); - if !self.resources.is_subtype(ty.into(), supertype.into()) { - bail!( - self.offset, - "invalid type: `table.atomic.get` only allows subtypes of `anyref`" - ); - } - Ok(()) + fn visit_i32x4_trunc_sat_f64x2_s_zero(&mut self) -> Self::Output { + self.check_v128_funary_op() } - fn visit_table_set(&mut self, table: u32) -> Self::Output { - let table = self.table_type_at(table)?; - debug_assert_type_indices_are_ids(table.element_type.into()); - self.pop_operand(Some(table.element_type.into()))?; - self.pop_operand(Some(table.index_type()))?; - Ok(()) + fn visit_i32x4_trunc_sat_f64x2_u_zero(&mut self) -> Self::Output { + self.check_v128_funary_op() } - fn visit_table_atomic_set(&mut self, _ordering: Ordering, table: u32) -> Self::Output { - self.visit_table_set(table)?; - // No validation of `ordering` is needed because `table.atomic.set` can - // be used on both shared and unshared tables. But we do need to limit - // which types can be used with this instruction. - let ty = self.table_type_at(table)?.element_type; - let supertype = RefType::ANYREF.shared().unwrap(); - if !self.resources.is_subtype(ty.into(), supertype.into()) { - bail!( - self.offset, - "invalid type: `table.atomic.set` only allows subtypes of `anyref`" - ); - } - Ok(()) + fn visit_f32x4_convert_i32x4_s(&mut self) -> Self::Output { + self.check_v128_funary_op() } - fn visit_table_grow(&mut self, table: u32) -> Self::Output { - let table = self.table_type_at(table)?; - debug_assert_type_indices_are_ids(table.element_type.into()); - self.pop_operand(Some(table.index_type()))?; - self.pop_operand(Some(table.element_type.into()))?; - self.push_operand(table.index_type())?; - Ok(()) + fn visit_f32x4_convert_i32x4_u(&mut self) -> Self::Output { + self.check_v128_funary_op() } - fn visit_table_size(&mut self, table: u32) -> Self::Output { - let table = self.table_type_at(table)?; - self.push_operand(table.index_type())?; - Ok(()) + fn visit_v128_not(&mut self) -> Self::Output { + self.check_v128_unary_op() } - fn visit_table_fill(&mut self, table: u32) -> Self::Output { - let table = self.table_type_at(table)?; - debug_assert_type_indices_are_ids(table.element_type.into()); - self.pop_operand(Some(table.index_type()))?; - self.pop_operand(Some(table.element_type.into()))?; - self.pop_operand(Some(table.index_type()))?; - Ok(()) + fn visit_i8x16_abs(&mut self) -> Self::Output { + self.check_v128_unary_op() } - fn visit_table_atomic_rmw_xchg(&mut self, _ordering: Ordering, table: u32) -> Self::Output { - let table = self.table_type_at(table)?; - let elem_ty = table.element_type.into(); - debug_assert_type_indices_are_ids(elem_ty); - let supertype = RefType::ANYREF.shared().unwrap(); - if !self.resources.is_subtype(elem_ty, supertype.into()) { - bail!( - self.offset, - "invalid type: `table.atomic.rmw.xchg` only allows subtypes of `anyref`" - ); - } - self.pop_operand(Some(elem_ty))?; - self.pop_operand(Some(table.index_type()))?; - self.push_operand(elem_ty)?; - Ok(()) + fn visit_i8x16_neg(&mut self) -> Self::Output { + self.check_v128_unary_op() } - fn visit_table_atomic_rmw_cmpxchg(&mut self, _ordering: Ordering, table: u32) -> Self::Output { - let table = self.table_type_at(table)?; - let elem_ty = table.element_type.into(); - debug_assert_type_indices_are_ids(elem_ty); - let supertype = RefType::EQREF.shared().unwrap(); - if !self.resources.is_subtype(elem_ty, supertype.into()) { - bail!( - self.offset, - "invalid type: `table.atomic.rmw.cmpxchg` only allows subtypes of `eqref`" - ); - } - self.pop_operand(Some(elem_ty))?; - self.pop_operand(Some(elem_ty))?; - self.pop_operand(Some(table.index_type()))?; - self.push_operand(elem_ty)?; - Ok(()) + fn visit_i8x16_popcnt(&mut self) -> Self::Output { + self.check_v128_unary_op() } - fn visit_struct_new(&mut self, struct_type_index: u32) -> Self::Output { - let struct_ty = self.struct_type_at(struct_type_index)?; - for ty in struct_ty.fields.iter().rev() { - self.pop_operand(Some(ty.element_type.unpack()))?; - } - self.push_concrete_ref(false, struct_type_index)?; - Ok(()) + fn visit_i16x8_abs(&mut self) -> Self::Output { + self.check_v128_unary_op() } - fn visit_struct_new_default(&mut self, type_index: u32) -> Self::Output { - let ty = self.struct_type_at(type_index)?; - for field in ty.fields.iter() { - let val_ty = field.element_type.unpack(); - if !val_ty.is_defaultable() { - bail!( - self.offset, - "invalid `struct.new_default`: {val_ty} field is not defaultable" - ); - } - } - self.push_concrete_ref(false, type_index)?; - Ok(()) + fn visit_i16x8_neg(&mut self) -> Self::Output { + self.check_v128_unary_op() } - fn visit_struct_get(&mut self, struct_type_index: u32, field_index: u32) -> Self::Output { - let field_ty = self.struct_field_at(struct_type_index, field_index)?; - if field_ty.element_type.is_packed() { - bail!( - self.offset, - "can only use struct `get` with non-packed storage types" - ) - } - self.pop_concrete_ref(true, struct_type_index)?; - self.push_operand(field_ty.element_type.unpack()) + fn visit_i32x4_abs(&mut self) -> Self::Output { + self.check_v128_unary_op() } - fn visit_struct_atomic_get( - &mut self, - _ordering: Ordering, - struct_type_index: u32, - field_index: u32, - ) -> Self::Output { - self.visit_struct_get(struct_type_index, field_index)?; - // The `atomic` version has some additional type restrictions. - let ty = self - .struct_field_at(struct_type_index, field_index)? - .element_type; - let is_valid_type = match ty { - StorageType::Val(ValType::I32) | StorageType::Val(ValType::I64) => true, - StorageType::Val(v) => self - .resources - .is_subtype(v, RefType::ANYREF.shared().unwrap().into()), - _ => false, - }; - if !is_valid_type { - bail!( - self.offset, - "invalid type: `struct.atomic.get` only allows `i32`, `i64` and subtypes of `anyref`" - ); - } - Ok(()) + fn visit_i32x4_neg(&mut self) -> Self::Output { + self.check_v128_unary_op() } - fn visit_struct_get_s(&mut self, struct_type_index: u32, field_index: u32) -> Self::Output { - let field_ty = self.struct_field_at(struct_type_index, field_index)?; - if !field_ty.element_type.is_packed() { - bail!( - self.offset, - "cannot use struct.get_s with non-packed storage types" - ) - } - self.pop_concrete_ref(true, struct_type_index)?; - self.push_operand(field_ty.element_type.unpack()) + fn visit_i64x2_abs(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i64x2_neg(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i16x8_extend_low_i8x16_s(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i16x8_extend_high_i8x16_s(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i16x8_extend_low_i8x16_u(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i16x8_extend_high_i8x16_u(&mut self) -> Self::Output { + self.check_v128_unary_op() } - fn visit_struct_atomic_get_s( - &mut self, - _ordering: Ordering, - struct_type_index: u32, - field_index: u32, - ) -> Self::Output { - self.visit_struct_get_s(struct_type_index, field_index)?; - // This instruction has the same type restrictions as the non-`atomic` version. - debug_assert!(matches!( - self.struct_field_at(struct_type_index, field_index)? - .element_type, - StorageType::I8 | StorageType::I16 - )); - Ok(()) + fn visit_i32x4_extend_low_i16x8_s(&mut self) -> Self::Output { + self.check_v128_unary_op() } - fn visit_struct_get_u(&mut self, struct_type_index: u32, field_index: u32) -> Self::Output { - let field_ty = self.struct_field_at(struct_type_index, field_index)?; - if !field_ty.element_type.is_packed() { - bail!( - self.offset, - "cannot use struct.get_u with non-packed storage types" - ) - } - self.pop_concrete_ref(true, struct_type_index)?; - self.push_operand(field_ty.element_type.unpack()) + fn visit_i32x4_extend_high_i16x8_s(&mut self) -> Self::Output { + self.check_v128_unary_op() } - fn visit_struct_atomic_get_u( - &mut self, - _ordering: Ordering, - struct_type_index: u32, - field_index: u32, - ) -> Self::Output { - self.visit_struct_get_s(struct_type_index, field_index)?; - // This instruction has the same type restrictions as the non-`atomic` version. - debug_assert!(matches!( - self.struct_field_at(struct_type_index, field_index)? - .element_type, - StorageType::I8 | StorageType::I16 - )); - Ok(()) + fn visit_i32x4_extend_low_i16x8_u(&mut self) -> Self::Output { + self.check_v128_unary_op() } - fn visit_struct_set(&mut self, struct_type_index: u32, field_index: u32) -> Self::Output { - let field_ty = self.mutable_struct_field_at(struct_type_index, field_index)?; - self.pop_operand(Some(field_ty.element_type.unpack()))?; - self.pop_concrete_ref(true, struct_type_index)?; - Ok(()) + fn visit_i32x4_extend_high_i16x8_u(&mut self) -> Self::Output { + self.check_v128_unary_op() } - fn visit_struct_atomic_set( - &mut self, - _ordering: Ordering, - struct_type_index: u32, - field_index: u32, - ) -> Self::Output { - self.visit_struct_set(struct_type_index, field_index)?; - // The `atomic` version has some additional type restrictions. - let ty = self - .struct_field_at(struct_type_index, field_index)? - .element_type; - let is_valid_type = match ty { - StorageType::I8 | StorageType::I16 => true, - StorageType::Val(ValType::I32) | StorageType::Val(ValType::I64) => true, - StorageType::Val(v) => self - .resources - .is_subtype(v, RefType::ANYREF.shared().unwrap().into()), - }; - if !is_valid_type { - bail!( - self.offset, - "invalid type: `struct.atomic.set` only allows `i8`, `i16`, `i32`, `i64` and subtypes of `anyref`" - ); - } - Ok(()) + fn visit_i64x2_extend_low_i32x4_s(&mut self) -> Self::Output { + self.check_v128_unary_op() } - fn visit_struct_atomic_rmw_add( - &mut self, - _ordering: Ordering, - struct_type_index: u32, - field_index: u32, - ) -> Self::Output { - self.check_struct_atomic_rmw("add", struct_type_index, field_index) + fn visit_i64x2_extend_high_i32x4_s(&mut self) -> Self::Output { + self.check_v128_unary_op() } - fn visit_struct_atomic_rmw_sub( - &mut self, - _ordering: Ordering, - struct_type_index: u32, - field_index: u32, - ) -> Self::Output { - self.check_struct_atomic_rmw("sub", struct_type_index, field_index) + fn visit_i64x2_extend_low_i32x4_u(&mut self) -> Self::Output { + self.check_v128_unary_op() } - fn visit_struct_atomic_rmw_and( - &mut self, - _ordering: Ordering, - struct_type_index: u32, - field_index: u32, - ) -> Self::Output { - self.check_struct_atomic_rmw("and", struct_type_index, field_index) + fn visit_i64x2_extend_high_i32x4_u(&mut self) -> Self::Output { + self.check_v128_unary_op() } - fn visit_struct_atomic_rmw_or( - &mut self, - _ordering: Ordering, - struct_type_index: u32, - field_index: u32, - ) -> Self::Output { - self.check_struct_atomic_rmw("or", struct_type_index, field_index) + fn visit_i16x8_extadd_pairwise_i8x16_s(&mut self) -> Self::Output { + self.check_v128_unary_op() } - fn visit_struct_atomic_rmw_xor( - &mut self, - _ordering: Ordering, - struct_type_index: u32, - field_index: u32, - ) -> Self::Output { - self.check_struct_atomic_rmw("xor", struct_type_index, field_index) + fn visit_i16x8_extadd_pairwise_i8x16_u(&mut self) -> Self::Output { + self.check_v128_unary_op() } - fn visit_struct_atomic_rmw_xchg( - &mut self, - _ordering: Ordering, - struct_type_index: u32, - field_index: u32, - ) -> Self::Output { - let field = self.mutable_struct_field_at(struct_type_index, field_index)?; - let is_valid_type = match field.element_type { - StorageType::Val(ValType::I32) | StorageType::Val(ValType::I64) => true, - StorageType::Val(v) => self - .resources - .is_subtype(v, RefType::ANYREF.shared().unwrap().into()), - _ => false, - }; - if !is_valid_type { - bail!( - self.offset, - "invalid type: `struct.atomic.rmw.xchg` only allows `i32`, `i64` and subtypes of `anyref`" - ); - } - let field_ty = field.element_type.unpack(); - self.pop_operand(Some(field_ty))?; - self.pop_concrete_ref(true, struct_type_index)?; - self.push_operand(field_ty)?; + fn visit_i32x4_extadd_pairwise_i16x8_s(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i32x4_extadd_pairwise_i16x8_u(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_v128_bitselect(&mut self) -> Self::Output { + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::V128)?; Ok(()) } - fn visit_struct_atomic_rmw_cmpxchg( - &mut self, - _ordering: Ordering, - struct_type_index: u32, - field_index: u32, - ) -> Self::Output { - let field = self.mutable_struct_field_at(struct_type_index, field_index)?; - let is_valid_type = match field.element_type { - StorageType::Val(ValType::I32) | StorageType::Val(ValType::I64) => true, - StorageType::Val(v) => self - .resources - .is_subtype(v, RefType::EQREF.shared().unwrap().into()), - _ => false, - }; - if !is_valid_type { - bail!( - self.offset, - "invalid type: `struct.atomic.rmw.cmpxchg` only allows `i32`, `i64` and subtypes of `eqref`" - ); - } - let field_ty = field.element_type.unpack(); - self.pop_operand(Some(field_ty))?; - self.pop_operand(Some(field_ty))?; - self.pop_concrete_ref(true, struct_type_index)?; - self.push_operand(field_ty)?; + fn visit_i8x16_relaxed_swizzle(&mut self) -> Self::Output { + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::V128)?; Ok(()) } - fn visit_array_new(&mut self, type_index: u32) -> Self::Output { - let array_ty = self.array_type_at(type_index)?; - self.pop_operand(Some(ValType::I32))?; - self.pop_operand(Some(array_ty.element_type.unpack()))?; - self.push_concrete_ref(false, type_index) + fn visit_i32x4_relaxed_trunc_f32x4_s(&mut self) -> Self::Output { + self.check_v128_unary_op() } - fn visit_array_new_default(&mut self, type_index: u32) -> Self::Output { - let ty = self.array_type_at(type_index)?; - let val_ty = ty.element_type.unpack(); - if !val_ty.is_defaultable() { - bail!( - self.offset, - "invalid `array.new_default`: {val_ty} field is not defaultable" - ); - } - self.pop_operand(Some(ValType::I32))?; - self.push_concrete_ref(false, type_index) + fn visit_i32x4_relaxed_trunc_f32x4_u(&mut self) -> Self::Output { + self.check_v128_unary_op() } - fn visit_array_new_fixed(&mut self, type_index: u32, n: u32) -> Self::Output { - let array_ty = self.array_type_at(type_index)?; - let elem_ty = array_ty.element_type.unpack(); - for _ in 0..n { - self.pop_operand(Some(elem_ty))?; - } - self.push_concrete_ref(false, type_index) + fn visit_i32x4_relaxed_trunc_f64x2_s_zero(&mut self) -> Self::Output { + self.check_v128_unary_op() } - fn visit_array_new_data(&mut self, type_index: u32, data_index: u32) -> Self::Output { - let array_ty = self.array_type_at(type_index)?; - let elem_ty = array_ty.element_type.unpack(); - match elem_ty { - ValType::I32 | ValType::I64 | ValType::F32 | ValType::F64 | ValType::V128 => {} - ValType::Ref(_) => bail!( - self.offset, - "type mismatch: array.new_data can only create arrays with numeric and vector elements" - ), - } - self.check_data_segment(data_index)?; - self.pop_operand(Some(ValType::I32))?; - self.pop_operand(Some(ValType::I32))?; - self.push_concrete_ref(false, type_index) + fn visit_i32x4_relaxed_trunc_f64x2_u_zero(&mut self) -> Self::Output { + self.check_v128_unary_op() } - fn visit_array_new_elem(&mut self, type_index: u32, elem_index: u32) -> Self::Output { - let array_ty = self.array_type_at(type_index)?; - let array_ref_ty = match array_ty.element_type.unpack() { - ValType::Ref(rt) => rt, - ValType::I32 | ValType::I64 | ValType::F32 | ValType::F64 | ValType::V128 => bail!( - self.offset, - "type mismatch: array.new_elem can only create arrays with reference elements" - ), - }; - let elem_ref_ty = self.element_type_at(elem_index)?; - if !self - .resources - .is_subtype(elem_ref_ty.into(), array_ref_ty.into()) - { - bail!( - self.offset, - "invalid array.new_elem instruction: element segment {elem_index} type mismatch: \ - expected {array_ref_ty}, found {elem_ref_ty}" - ) - } - self.pop_operand(Some(ValType::I32))?; - self.pop_operand(Some(ValType::I32))?; - self.push_concrete_ref(false, type_index) + fn visit_f32x4_relaxed_madd(&mut self) -> Self::Output { + self.check_v128_ternary_op() } - fn visit_array_get(&mut self, type_index: u32) -> Self::Output { - let array_ty = self.array_type_at(type_index)?; - let elem_ty = array_ty.element_type; - if elem_ty.is_packed() { - bail!( - self.offset, - "cannot use array.get with packed storage types" - ) - } - self.pop_operand(Some(ValType::I32))?; - self.pop_concrete_ref(true, type_index)?; - self.push_operand(elem_ty.unpack()) + fn visit_f32x4_relaxed_nmadd(&mut self) -> Self::Output { + self.check_v128_ternary_op() } - fn visit_array_atomic_get(&mut self, _ordering: Ordering, type_index: u32) -> Self::Output { - self.visit_array_get(type_index)?; - // The `atomic` version has some additional type restrictions. - let elem_ty = self.array_type_at(type_index)?.element_type; - let is_valid_type = match elem_ty { - StorageType::Val(ValType::I32) | StorageType::Val(ValType::I64) => true, - StorageType::Val(v) => self - .resources - .is_subtype(v, RefType::ANYREF.shared().unwrap().into()), - _ => false, - }; - if !is_valid_type { - bail!( - self.offset, - "invalid type: `array.atomic.get` only allows `i32`, `i64` and subtypes of `anyref`" - ); - } - Ok(()) + fn visit_f64x2_relaxed_madd(&mut self) -> Self::Output { + self.check_v128_ternary_op() } - fn visit_array_get_s(&mut self, type_index: u32) -> Self::Output { - let array_ty = self.array_type_at(type_index)?; - let elem_ty = array_ty.element_type; - if !elem_ty.is_packed() { - bail!( - self.offset, - "cannot use array.get_s with non-packed storage types" - ) - } - self.pop_operand(Some(ValType::I32))?; - self.pop_concrete_ref(true, type_index)?; - self.push_operand(elem_ty.unpack()) + fn visit_f64x2_relaxed_nmadd(&mut self) -> Self::Output { + self.check_v128_ternary_op() } - fn visit_array_atomic_get_s(&mut self, _ordering: Ordering, type_index: u32) -> Self::Output { - self.visit_array_get_s(type_index)?; - // This instruction has the same type restrictions as the non-`atomic` version. - debug_assert!(matches!( - self.array_type_at(type_index)?.element_type, - StorageType::I8 | StorageType::I16 - )); - Ok(()) + fn visit_i8x16_relaxed_laneselect(&mut self) -> Self::Output { + self.check_v128_ternary_op() } - fn visit_array_get_u(&mut self, type_index: u32) -> Self::Output { - let array_ty = self.array_type_at(type_index)?; - let elem_ty = array_ty.element_type; - if !elem_ty.is_packed() { - bail!( - self.offset, - "cannot use array.get_u with non-packed storage types" - ) - } - self.pop_operand(Some(ValType::I32))?; - self.pop_concrete_ref(true, type_index)?; - self.push_operand(elem_ty.unpack()) + fn visit_i16x8_relaxed_laneselect(&mut self) -> Self::Output { + self.check_v128_ternary_op() } - fn visit_array_atomic_get_u(&mut self, _ordering: Ordering, type_index: u32) -> Self::Output { - self.visit_array_get_u(type_index)?; - // This instruction has the same type restrictions as the non-`atomic` version. - debug_assert!(matches!( - self.array_type_at(type_index)?.element_type, - StorageType::I8 | StorageType::I16 - )); - Ok(()) + fn visit_i32x4_relaxed_laneselect(&mut self) -> Self::Output { + self.check_v128_ternary_op() } - fn visit_array_set(&mut self, type_index: u32) -> Self::Output { - let array_ty = self.mutable_array_type_at(type_index)?; - self.pop_operand(Some(array_ty.element_type.unpack()))?; - self.pop_operand(Some(ValType::I32))?; - self.pop_concrete_ref(true, type_index)?; - Ok(()) + fn visit_i64x2_relaxed_laneselect(&mut self) -> Self::Output { + self.check_v128_ternary_op() } - fn visit_array_atomic_set(&mut self, _ordering: Ordering, type_index: u32) -> Self::Output { - self.visit_array_set(type_index)?; - // The `atomic` version has some additional type restrictions. - let elem_ty = self.array_type_at(type_index)?.element_type; - let is_valid_type = match elem_ty { - StorageType::I8 | StorageType::I16 => true, - StorageType::Val(ValType::I32) | StorageType::Val(ValType::I64) => true, - StorageType::Val(v) => self - .resources - .is_subtype(v, RefType::ANYREF.shared().unwrap().into()), - }; - if !is_valid_type { - bail!( - self.offset, - "invalid type: `array.atomic.set` only allows `i8`, `i16`, `i32`, `i64` and subtypes of `anyref`" - ); - } - Ok(()) + fn visit_f32x4_relaxed_min(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_array_len(&mut self) -> Self::Output { - self.pop_maybe_shared_ref(AbstractHeapType::Array)?; - self.push_operand(ValType::I32) + fn visit_f32x4_relaxed_max(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_array_fill(&mut self, array_type_index: u32) -> Self::Output { - let array_ty = self.mutable_array_type_at(array_type_index)?; - self.pop_operand(Some(ValType::I32))?; - self.pop_operand(Some(array_ty.element_type.unpack()))?; - self.pop_operand(Some(ValType::I32))?; - self.pop_concrete_ref(true, array_type_index)?; - Ok(()) + fn visit_f64x2_relaxed_min(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_array_copy(&mut self, type_index_dst: u32, type_index_src: u32) -> Self::Output { - let array_ty_dst = self.mutable_array_type_at(type_index_dst)?; - let array_ty_src = self.array_type_at(type_index_src)?; - match (array_ty_dst.element_type, array_ty_src.element_type) { - (StorageType::I8, StorageType::I8) => {} - (StorageType::I8, ty) => bail!( - self.offset, - "array types do not match: expected i8, found {ty}" - ), - (StorageType::I16, StorageType::I16) => {} - (StorageType::I16, ty) => bail!( - self.offset, - "array types do not match: expected i16, found {ty}" - ), - (StorageType::Val(dst), StorageType::Val(src)) => { - if !self.resources.is_subtype(src, dst) { - bail!( - self.offset, - "array types do not match: expected {dst}, found {src}" - ) - } - } - (StorageType::Val(dst), src) => { - bail!( - self.offset, - "array types do not match: expected {dst}, found {src}" - ) - } - } - self.pop_operand(Some(ValType::I32))?; - self.pop_operand(Some(ValType::I32))?; - self.pop_concrete_ref(true, type_index_src)?; - self.pop_operand(Some(ValType::I32))?; - self.pop_concrete_ref(true, type_index_dst)?; - Ok(()) + fn visit_f64x2_relaxed_max(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_array_init_data( - &mut self, - array_type_index: u32, - array_data_index: u32, - ) -> Self::Output { - let array_ty = self.mutable_array_type_at(array_type_index)?; - let val_ty = array_ty.element_type.unpack(); - match val_ty { - ValType::I32 | ValType::I64 | ValType::F32 | ValType::F64 | ValType::V128 => {} - ValType::Ref(_) => bail!( - self.offset, - "invalid array.init_data: array type is not numeric or vector" - ), - } - self.check_data_segment(array_data_index)?; - self.pop_operand(Some(ValType::I32))?; - self.pop_operand(Some(ValType::I32))?; - self.pop_operand(Some(ValType::I32))?; - self.pop_concrete_ref(true, array_type_index)?; - Ok(()) + fn visit_i16x8_relaxed_q15mulr_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_array_init_elem(&mut self, type_index: u32, elem_index: u32) -> Self::Output { - let array_ty = self.mutable_array_type_at(type_index)?; - let array_ref_ty = match array_ty.element_type.unpack() { - ValType::Ref(rt) => rt, - ValType::I32 | ValType::I64 | ValType::F32 | ValType::F64 | ValType::V128 => bail!( - self.offset, - "type mismatch: array.init_elem can only create arrays with reference elements" - ), - }; - let elem_ref_ty = self.element_type_at(elem_index)?; - if !self - .resources - .is_subtype(elem_ref_ty.into(), array_ref_ty.into()) - { - bail!( - self.offset, - "invalid array.init_elem instruction: element segment {elem_index} type mismatch: \ - expected {array_ref_ty}, found {elem_ref_ty}" - ) - } - self.pop_operand(Some(ValType::I32))?; - self.pop_operand(Some(ValType::I32))?; - self.pop_operand(Some(ValType::I32))?; - self.pop_concrete_ref(true, type_index)?; - Ok(()) + fn visit_i16x8_relaxed_dot_i8x16_i7x16_s(&mut self) -> Self::Output { + self.check_v128_binary_op() } - fn visit_array_atomic_rmw_add(&mut self, _ordering: Ordering, type_index: u32) -> Self::Output { - self.check_array_atomic_rmw("add", type_index) + fn visit_i32x4_relaxed_dot_i8x16_i7x16_add_s(&mut self) -> Self::Output { + self.check_v128_ternary_op() } - fn visit_array_atomic_rmw_sub(&mut self, _ordering: Ordering, type_index: u32) -> Self::Output { - self.check_array_atomic_rmw("sub", type_index) + fn visit_v128_any_true(&mut self) -> Self::Output { + self.check_v128_bitmask_op() } - fn visit_array_atomic_rmw_and(&mut self, _ordering: Ordering, type_index: u32) -> Self::Output { - self.check_array_atomic_rmw("and", type_index) + fn visit_i8x16_all_true(&mut self) -> Self::Output { + self.check_v128_bitmask_op() } - fn visit_array_atomic_rmw_or(&mut self, _ordering: Ordering, type_index: u32) -> Self::Output { - self.check_array_atomic_rmw("or", type_index) + fn visit_i8x16_bitmask(&mut self) -> Self::Output { + self.check_v128_bitmask_op() } - fn visit_array_atomic_rmw_xor(&mut self, _ordering: Ordering, type_index: u32) -> Self::Output { - self.check_array_atomic_rmw("xor", type_index) + fn visit_i16x8_all_true(&mut self) -> Self::Output { + self.check_v128_bitmask_op() } - fn visit_array_atomic_rmw_xchg( - &mut self, - _ordering: Ordering, - type_index: u32, - ) -> Self::Output { - let field = self.mutable_array_type_at(type_index)?; - let is_valid_type = match field.element_type { - StorageType::Val(ValType::I32) | StorageType::Val(ValType::I64) => true, - StorageType::Val(v) => self - .resources - .is_subtype(v, RefType::ANYREF.shared().unwrap().into()), - _ => false, - }; - if !is_valid_type { - bail!( - self.offset, - "invalid type: `array.atomic.rmw.xchg` only allows `i32`, `i64` and subtypes of `anyref`" - ); - } - let elem_ty = field.element_type.unpack(); - self.pop_operand(Some(elem_ty))?; - self.pop_operand(Some(ValType::I32))?; - self.pop_concrete_ref(true, type_index)?; - self.push_operand(elem_ty)?; - Ok(()) + fn visit_i16x8_bitmask(&mut self) -> Self::Output { + self.check_v128_bitmask_op() } - fn visit_array_atomic_rmw_cmpxchg( - &mut self, - _ordering: Ordering, - type_index: u32, - ) -> Self::Output { - let field = self.mutable_array_type_at(type_index)?; - let is_valid_type = match field.element_type { - StorageType::Val(ValType::I32) | StorageType::Val(ValType::I64) => true, - StorageType::Val(v) => self - .resources - .is_subtype(v, RefType::EQREF.shared().unwrap().into()), - _ => false, - }; - if !is_valid_type { - bail!( - self.offset, - "invalid type: `array.atomic.rmw.cmpxchg` only allows `i32`, `i64` and subtypes of `eqref`" - ); - } - let elem_ty = field.element_type.unpack(); - self.pop_operand(Some(elem_ty))?; - self.pop_operand(Some(elem_ty))?; - self.pop_operand(Some(ValType::I32))?; - self.pop_concrete_ref(true, type_index)?; - self.push_operand(elem_ty)?; - Ok(()) + fn visit_i32x4_all_true(&mut self) -> Self::Output { + self.check_v128_bitmask_op() } - fn visit_any_convert_extern(&mut self) -> Self::Output { - let any_ref = match self.pop_maybe_shared_ref(AbstractHeapType::Extern)? { - MaybeType::Bottom | MaybeType::UnknownRef(_) => { - MaybeType::UnknownRef(Some(AbstractHeapType::Any)) - } - MaybeType::Known(ty) => { - let shared = self.resources.is_shared(ty); - let heap_type = HeapType::Abstract { - shared, - ty: AbstractHeapType::Any, - }; - let any_ref = RefType::new(ty.is_nullable(), heap_type).unwrap(); - MaybeType::Known(any_ref) - } - }; - self.push_operand(any_ref) + fn visit_i32x4_bitmask(&mut self) -> Self::Output { + self.check_v128_bitmask_op() } - fn visit_extern_convert_any(&mut self) -> Self::Output { - let extern_ref = match self.pop_maybe_shared_ref(AbstractHeapType::Any)? { - MaybeType::Bottom | MaybeType::UnknownRef(_) => { - MaybeType::UnknownRef(Some(AbstractHeapType::Extern)) - } - MaybeType::Known(ty) => { - let shared = self.resources.is_shared(ty); - let heap_type = HeapType::Abstract { - shared, - ty: AbstractHeapType::Extern, - }; - let extern_ref = RefType::new(ty.is_nullable(), heap_type).unwrap(); - MaybeType::Known(extern_ref) - } - }; - self.push_operand(extern_ref) + fn visit_i64x2_all_true(&mut self) -> Self::Output { + self.check_v128_bitmask_op() } - fn visit_ref_test_non_null(&mut self, heap_type: HeapType) -> Self::Output { - self.check_ref_test(false, heap_type) + fn visit_i64x2_bitmask(&mut self) -> Self::Output { + self.check_v128_bitmask_op() } - fn visit_ref_test_nullable(&mut self, heap_type: HeapType) -> Self::Output { - self.check_ref_test(true, heap_type) + fn visit_i8x16_shl(&mut self) -> Self::Output { + self.check_v128_shift_op() } - fn visit_ref_cast_non_null(&mut self, heap_type: HeapType) -> Self::Output { - self.check_ref_cast(false, heap_type) + fn visit_i8x16_shr_s(&mut self) -> Self::Output { + self.check_v128_shift_op() } - fn visit_ref_cast_nullable(&mut self, heap_type: HeapType) -> Self::Output { - self.check_ref_cast(true, heap_type) + fn visit_i8x16_shr_u(&mut self) -> Self::Output { + self.check_v128_shift_op() } - fn visit_br_on_cast( - &mut self, - relative_depth: u32, - mut from_ref_type: RefType, - mut to_ref_type: RefType, - ) -> Self::Output { - self.resources - .check_ref_type(&mut from_ref_type, self.offset)?; - self.resources - .check_ref_type(&mut to_ref_type, self.offset)?; - - if !self - .resources - .is_subtype(to_ref_type.into(), from_ref_type.into()) - { - bail!( - self.offset, - "type mismatch: expected {from_ref_type}, found {to_ref_type}" - ); - } - - let (block_ty, frame_kind) = self.jump(relative_depth)?; - let mut label_types = self.label_types(block_ty, frame_kind)?; - - match label_types.next_back() { - Some(label_ty) if self.resources.is_subtype(to_ref_type.into(), label_ty) => { - self.pop_operand(Some(from_ref_type.into()))?; - } - Some(label_ty) => bail!( - self.offset, - "type mismatch: casting to type {to_ref_type}, but it does not match \ - label result type {label_ty}" - ), - None => bail!( - self.offset, - "type mismatch: br_on_cast to label with empty types, must have a reference type" - ), - }; - - self.pop_push_label_types(label_types)?; - let diff_ty = RefType::difference(from_ref_type, to_ref_type); - self.push_operand(diff_ty)?; - Ok(()) + fn visit_i16x8_shl(&mut self) -> Self::Output { + self.check_v128_shift_op() } - fn visit_br_on_cast_fail( - &mut self, - relative_depth: u32, - mut from_ref_type: RefType, - mut to_ref_type: RefType, - ) -> Self::Output { - self.resources - .check_ref_type(&mut from_ref_type, self.offset)?; - self.resources - .check_ref_type(&mut to_ref_type, self.offset)?; - - if !self - .resources - .is_subtype(to_ref_type.into(), from_ref_type.into()) - { - bail!( - self.offset, - "type mismatch: expected {from_ref_type}, found {to_ref_type}" - ); - } - - let (block_ty, frame_kind) = self.jump(relative_depth)?; - let mut label_tys = self.label_types(block_ty, frame_kind)?; - - let diff_ty = RefType::difference(from_ref_type, to_ref_type); - match label_tys.next_back() { - Some(label_ty) if self.resources.is_subtype(diff_ty.into(), label_ty) => { - self.pop_operand(Some(from_ref_type.into()))?; - } - Some(label_ty) => bail!( - self.offset, - "type mismatch: expected label result type {label_ty}, found {diff_ty}" - ), - None => bail!( - self.offset, - "type mismatch: expected a reference type, found nothing" - ), - } - - self.pop_push_label_types(label_tys)?; - self.push_operand(to_ref_type)?; - Ok(()) + fn visit_i16x8_shr_s(&mut self) -> Self::Output { + self.check_v128_shift_op() } - fn visit_ref_i31(&mut self) -> Self::Output { - self.pop_operand(Some(ValType::I32))?; - self.push_operand(ValType::Ref(RefType::I31)) + fn visit_i16x8_shr_u(&mut self) -> Self::Output { + self.check_v128_shift_op() } - fn visit_ref_i31_shared(&mut self) -> Self::Output { - self.pop_operand(Some(ValType::I32))?; - self.push_operand(ValType::Ref( - RefType::I31.shared().expect("i31 is abstract"), - )) + fn visit_i32x4_shl(&mut self) -> Self::Output { + self.check_v128_shift_op() } - fn visit_i31_get_s(&mut self) -> Self::Output { - self.pop_maybe_shared_ref(AbstractHeapType::I31)?; - self.push_operand(ValType::I32) + fn visit_i32x4_shr_s(&mut self) -> Self::Output { + self.check_v128_shift_op() } - fn visit_i31_get_u(&mut self) -> Self::Output { - self.pop_maybe_shared_ref(AbstractHeapType::I31)?; - self.push_operand(ValType::I32) + fn visit_i32x4_shr_u(&mut self) -> Self::Output { + self.check_v128_shift_op() } - fn visit_try(&mut self, mut ty: BlockType) -> Self::Output { - self.check_block_type(&mut ty)?; - for ty in self.params(ty)?.rev() { - self.pop_operand(Some(ty))?; - } - self.push_ctrl(FrameKind::LegacyTry, ty)?; - Ok(()) + fn visit_i64x2_shl(&mut self) -> Self::Output { + self.check_v128_shift_op() } - fn visit_catch(&mut self, index: u32) -> Self::Output { - let frame = self.pop_ctrl()?; - if frame.kind != FrameKind::LegacyTry && frame.kind != FrameKind::LegacyCatch { - bail!(self.offset, "catch found outside of an `try` block"); - } - // Start a new frame and push `exnref` value. - let height = self.operands.len(); - let init_height = self.local_inits.push_ctrl(); - self.control.push(Frame { - kind: FrameKind::LegacyCatch, - block_type: frame.block_type, - height, - unreachable: false, - init_height, - }); - // Push exception argument types. - let ty = self.exception_tag_at(index)?; - for ty in ty.params() { - self.push_operand(*ty)?; - } - Ok(()) + fn visit_i64x2_shr_s(&mut self) -> Self::Output { + self.check_v128_shift_op() } - fn visit_rethrow(&mut self, relative_depth: u32) -> Self::Output { - // This is not a jump, but we need to check that the `rethrow` - // targets an actual `catch` to get the exception. - let (_, kind) = self.jump(relative_depth)?; - if kind != FrameKind::LegacyCatch && kind != FrameKind::LegacyCatchAll { - bail!( - self.offset, - "invalid rethrow label: target was not a `catch` block" - ); - } - self.unreachable()?; + fn visit_i64x2_shr_u(&mut self) -> Self::Output { + self.check_v128_shift_op() + } + fn visit_i8x16_swizzle(&mut self) -> Self::Output { + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::V128)?; Ok(()) } - fn visit_delegate(&mut self, relative_depth: u32) -> Self::Output { - let frame = self.pop_ctrl()?; - if frame.kind != FrameKind::LegacyTry { - bail!(self.offset, "delegate found outside of an `try` block"); - } - // This operation is not a jump, but we need to check the - // depth for validity - let _ = self.jump(relative_depth)?; - for ty in self.results(frame.block_type)? { - self.push_operand(ty)?; + fn visit_i8x16_shuffle(&mut self, lanes: [u8; 16]) -> Self::Output { + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(ValType::V128))?; + for i in lanes { + self.check_simd_lane_index(i, 32)?; } + self.push_operand(ValType::V128)?; Ok(()) } - fn visit_catch_all(&mut self) -> Self::Output { - let frame = self.pop_ctrl()?; - if frame.kind == FrameKind::LegacyCatchAll { - bail!(self.offset, "only one catch_all allowed per `try` block"); - } else if frame.kind != FrameKind::LegacyTry && frame.kind != FrameKind::LegacyCatch { - bail!(self.offset, "catch_all found outside of a `try` block"); - } - let height = self.operands.len(); - let init_height = self.local_inits.push_ctrl(); - self.control.push(Frame { - kind: FrameKind::LegacyCatchAll, - block_type: frame.block_type, - height, - unreachable: false, - init_height, - }); + fn visit_v128_load8_splat(&mut self, memarg: MemArg) -> Self::Output { + let ty = self.check_memarg(memarg)?; + self.pop_operand(Some(ty))?; + self.push_operand(ValType::V128)?; Ok(()) } - fn visit_cont_new(&mut self, type_index: u32) -> Self::Output { - let cont_ty = self.cont_type_at(type_index)?; - let rt = RefType::concrete(true, cont_ty.0); - self.pop_ref(Some(rt))?; - self.push_concrete_ref(false, type_index)?; + fn visit_v128_load16_splat(&mut self, memarg: MemArg) -> Self::Output { + let ty = self.check_memarg(memarg)?; + self.pop_operand(Some(ty))?; + self.push_operand(ValType::V128)?; Ok(()) } - fn visit_cont_bind(&mut self, argument_index: u32, result_index: u32) -> Self::Output { - // [ts1 ts1'] -> [ts2] - let arg_cont = self.cont_type_at(argument_index)?; - let arg_func = self.func_type_of_cont_type(arg_cont); - // [ts1''] -> [ts2'] - let res_cont = self.cont_type_at(result_index)?; - let res_func = self.func_type_of_cont_type(res_cont); - - // Verify that the argument's domain is at least as large as the - // result's domain. - if arg_func.params().len() < res_func.params().len() { - bail!(self.offset, "type mismatch in continuation arguments"); - } - - let argcnt = arg_func.params().len() - res_func.params().len(); - - // Check that [ts1'] -> [ts2] <: [ts1''] -> [ts2'] - if !self.is_subtype_many(res_func.params(), &arg_func.params()[argcnt..]) - || arg_func.results().len() != res_func.results().len() - || !self.is_subtype_many(arg_func.results(), res_func.results()) - { - bail!(self.offset, "type mismatch in continuation types"); - } - - // Check that the continuation is available on the stack. - self.pop_concrete_ref(true, argument_index)?; - - // Check that the argument prefix is available on the stack. - for &ty in arg_func.params().iter().take(argcnt).rev() { - self.pop_operand(Some(ty))?; - } - - // Construct the result type. - self.push_concrete_ref(false, result_index)?; - + fn visit_v128_load32_splat(&mut self, memarg: MemArg) -> Self::Output { + let ty = self.check_memarg(memarg)?; + self.pop_operand(Some(ty))?; + self.push_operand(ValType::V128)?; Ok(()) } - fn visit_suspend(&mut self, tag_index: u32) -> Self::Output { - let ft = &self.tag_at(tag_index)?; - for &ty in ft.params().iter().rev() { - self.pop_operand(Some(ty))?; - } - for &ty in ft.results() { - self.push_operand(ty)?; - } + fn visit_v128_load32_zero(&mut self, memarg: MemArg) -> Self::Output { + self.visit_v128_load32_splat(memarg) + } + fn visit_v128_load64_splat(&mut self, memarg: MemArg) -> Self::Output { + self.check_v128_load_op(memarg) + } + fn visit_v128_load64_zero(&mut self, memarg: MemArg) -> Self::Output { + self.check_v128_load_op(memarg) + } + fn visit_v128_load8x8_s(&mut self, memarg: MemArg) -> Self::Output { + self.check_v128_load_op(memarg) + } + fn visit_v128_load8x8_u(&mut self, memarg: MemArg) -> Self::Output { + self.check_v128_load_op(memarg) + } + fn visit_v128_load16x4_s(&mut self, memarg: MemArg) -> Self::Output { + self.check_v128_load_op(memarg) + } + fn visit_v128_load16x4_u(&mut self, memarg: MemArg) -> Self::Output { + self.check_v128_load_op(memarg) + } + fn visit_v128_load32x2_s(&mut self, memarg: MemArg) -> Self::Output { + self.check_v128_load_op(memarg) + } + fn visit_v128_load32x2_u(&mut self, memarg: MemArg) -> Self::Output { + self.check_v128_load_op(memarg) + } + fn visit_v128_load8_lane(&mut self, memarg: MemArg, lane: u8) -> Self::Output { + let idx = self.check_memarg(memarg)?; + self.check_simd_lane_index(lane, 16)?; + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(idx))?; + self.push_operand(ValType::V128)?; Ok(()) } - fn visit_resume(&mut self, type_index: u32, table: ResumeTable) -> Self::Output { - // [ts1] -> [ts2] - let ft = self.check_resume_table(table, type_index)?; - self.pop_concrete_ref(true, type_index)?; - // Check that ts1 are available on the stack. - for &ty in ft.params().iter().rev() { - self.pop_operand(Some(ty))?; - } - - // Make ts2 available on the stack. - for &ty in ft.results() { - self.push_operand(ty)?; - } + fn visit_v128_load16_lane(&mut self, memarg: MemArg, lane: u8) -> Self::Output { + let idx = self.check_memarg(memarg)?; + self.check_simd_lane_index(lane, 8)?; + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(idx))?; + self.push_operand(ValType::V128)?; Ok(()) } - fn visit_resume_throw( - &mut self, - type_index: u32, - tag_index: u32, - table: ResumeTable, - ) -> Self::Output { - // [ts1] -> [ts2] - let ft = self.check_resume_table(table, type_index)?; - // [ts1'] -> [] - let tag_ty = self.exception_tag_at(tag_index)?; - if tag_ty.results().len() != 0 { - bail!(self.offset, "type mismatch: non-empty tag result type") - } - self.pop_concrete_ref(true, type_index)?; - // Check that ts1' are available on the stack. - for &ty in tag_ty.params().iter().rev() { - self.pop_operand(Some(ty))?; - } - - // Make ts2 available on the stack. - for &ty in ft.results() { - self.push_operand(ty)?; - } + fn visit_v128_load32_lane(&mut self, memarg: MemArg, lane: u8) -> Self::Output { + let idx = self.check_memarg(memarg)?; + self.check_simd_lane_index(lane, 4)?; + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(idx))?; + self.push_operand(ValType::V128)?; Ok(()) } - fn visit_switch(&mut self, type_index: u32, tag_index: u32) -> Self::Output { - // [t1* (ref null $ct2)] -> [te1*] - let cont_ty = self.cont_type_at(type_index)?; - let func_ty = self.func_type_of_cont_type(cont_ty); - // [] -> [t*] - let tag_ty = self.tag_at(tag_index)?; - if tag_ty.params().len() != 0 { - bail!(self.offset, "type mismatch: non-empty tag parameter type") - } - // Extract the other continuation reference - match func_ty.params().last() { - Some(ValType::Ref(rt)) if rt.is_concrete_type_ref() => { - let other_cont_id = rt - .type_index() - .unwrap() - .unpack() - .as_core_type_id() - .expect("expected canonicalized index"); - let sub_ty = self.resources.sub_type_at_id(other_cont_id); - let other_cont_ty = - if let CompositeInnerType::Cont(cont) = &sub_ty.composite_type.inner { - cont - } else { - bail!(self.offset, "non-continuation type"); - }; - let other_func_ty = self.func_type_of_cont_type(&other_cont_ty); - if func_ty.results().len() != tag_ty.results().len() - || !self.is_subtype_many(func_ty.results(), tag_ty.results()) - || other_func_ty.results().len() != tag_ty.results().len() - || !self.is_subtype_many(tag_ty.results(), other_func_ty.results()) - { - bail!(self.offset, "type mismatch in continuation types") - } - - // Pop the continuation reference. - self.pop_concrete_ref(true, type_index)?; - - // Check that the arguments t1* are available on the - // stack. - for &ty in func_ty.params().iter().rev().skip(1) { - self.pop_operand(Some(ty))?; - } - - // Make the results t2* available on the stack. - for &ty in other_func_ty.params() { - self.push_operand(ty)?; - } - } - Some(ty) => bail!( - self.offset, - "type mismatch: expected a continuation reference, found {}", - ty_to_str(*ty) - ), - None => bail!( - self.offset, - "type mismatch: instruction requires a continuation reference" - ), - } + fn visit_v128_load64_lane(&mut self, memarg: MemArg, lane: u8) -> Self::Output { + let idx = self.check_memarg(memarg)?; + self.check_simd_lane_index(lane, 2)?; + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(idx))?; + self.push_operand(ValType::V128)?; Ok(()) } - fn visit_i64_add128(&mut self) -> Result<()> { - self.check_binop128() + fn visit_v128_store8_lane(&mut self, memarg: MemArg, lane: u8) -> Self::Output { + let idx = self.check_memarg(memarg)?; + self.check_simd_lane_index(lane, 16)?; + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(idx))?; + Ok(()) } - fn visit_i64_sub128(&mut self) -> Result<()> { - self.check_binop128() + fn visit_v128_store16_lane(&mut self, memarg: MemArg, lane: u8) -> Self::Output { + let idx = self.check_memarg(memarg)?; + self.check_simd_lane_index(lane, 8)?; + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(idx))?; + Ok(()) } - fn visit_i64_mul_wide_s(&mut self) -> Result<()> { - self.check_i64_mul_wide() + fn visit_v128_store32_lane(&mut self, memarg: MemArg, lane: u8) -> Self::Output { + let idx = self.check_memarg(memarg)?; + self.check_simd_lane_index(lane, 4)?; + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(idx))?; + Ok(()) } - fn visit_i64_mul_wide_u(&mut self) -> Result<()> { - self.check_i64_mul_wide() + fn visit_v128_store64_lane(&mut self, memarg: MemArg, lane: u8) -> Self::Output { + let idx = self.check_memarg(memarg)?; + self.check_simd_lane_index(lane, 2)?; + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(idx))?; + Ok(()) } } From 26cc42de684c2ed5f5f20925dcb74e1c382b3e82 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 12 Nov 2024 13:14:03 +0100 Subject: [PATCH 13/83] add Operator::Simd variant --- crates/wasmparser/src/arity.rs | 1 + crates/wasmparser/src/binary_reader.rs | 12 +++++++++++- crates/wasmparser/src/readers/core/operators.rs | 7 +++++++ 3 files changed, 19 insertions(+), 1 deletion(-) diff --git a/crates/wasmparser/src/arity.rs b/crates/wasmparser/src/arity.rs index 53bd5262d6..d8876c3eae 100644 --- a/crates/wasmparser/src/arity.rs +++ b/crates/wasmparser/src/arity.rs @@ -249,6 +249,7 @@ impl Operator<'_> { operator_arity!(arity module $({ $($arg: $argty),* })? $($ann)*) } )* + Self::Simd(operator) => operator.operator_arity(), } ); } diff --git a/crates/wasmparser/src/binary_reader.rs b/crates/wasmparser/src/binary_reader.rs index b7d8c0d9a5..0dba2ac02a 100644 --- a/crates/wasmparser/src/binary_reader.rs +++ b/crates/wasmparser/src/binary_reader.rs @@ -2113,8 +2113,18 @@ impl<'a> VisitOperator<'a> for OperatorFactory<'a> { for_each_operator!(define_visit_operator); } +macro_rules! define_visit_simd_operator { + ($(@$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => { + $( + fn $visit(&mut self $($(,$arg: $argty)*)?) -> Operator<'a> { + Operator::Simd(SimdOperator::$op $({ $($arg),* })?) + } + )* + } +} + impl<'a> VisitSimdOperator<'a> for OperatorFactory<'a> { - for_each_simd_operator!(define_visit_operator); + for_each_simd_operator!(define_visit_simd_operator); } /// Iterator returned from [`BinaryReader::read_iter`]. diff --git a/crates/wasmparser/src/readers/core/operators.rs b/crates/wasmparser/src/readers/core/operators.rs index 5c794d8482..5d519d02d9 100644 --- a/crates/wasmparser/src/readers/core/operators.rs +++ b/crates/wasmparser/src/readers/core/operators.rs @@ -224,6 +224,7 @@ macro_rules! define_operator { $( $op $({ $($payload)* })?, )* + Simd(SimdOperator), } } } @@ -439,6 +440,12 @@ pub trait VisitOperator<'a> { $( Operator::$op $({ $($arg),* })? => self.$visit($($($arg.clone()),*)?), )* + Operator::Simd(op) => { + let Some(visitor) = self.simd_visitor() else { + panic!("missing SIMD visitor for: {op:?}") + }; + visitor.visit_simd_operator(op) + } } } } From ac2d6a85af34fbf9beaf137fd7ab4c482143bcf9 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 12 Nov 2024 13:14:19 +0100 Subject: [PATCH 14/83] remove simd operators from for_each_operator macro --- crates/wasmparser/src/lib.rs | 264 ----------------------------------- 1 file changed, 264 deletions(-) diff --git a/crates/wasmparser/src/lib.rs b/crates/wasmparser/src/lib.rs index 6cd1140df9..7a538f9b11 100644 --- a/crates/wasmparser/src/lib.rs +++ b/crates/wasmparser/src/lib.rs @@ -598,270 +598,6 @@ macro_rules! for_each_operator { @shared_everything_threads ArrayAtomicRmwCmpxchg { ordering: $crate::Ordering, array_type_index: u32 } => visit_array_atomic_rmw_cmpxchg (arity 4 -> 1) @shared_everything_threads RefI31Shared => visit_ref_i31_shared (arity 1 -> 1) - // 0xFD operators - // 128-bit SIMD - // - https://github.com/webassembly/simd - // - https://webassembly.github.io/simd/core/binary/instructions.html - @simd V128Load { memarg: $crate::MemArg } => visit_v128_load (load v128) - @simd V128Load8x8S { memarg: $crate::MemArg } => visit_v128_load8x8_s (load v128) - @simd V128Load8x8U { memarg: $crate::MemArg } => visit_v128_load8x8_u (load v128) - @simd V128Load16x4S { memarg: $crate::MemArg } => visit_v128_load16x4_s (load v128) - @simd V128Load16x4U { memarg: $crate::MemArg } => visit_v128_load16x4_u (load v128) - @simd V128Load32x2S { memarg: $crate::MemArg } => visit_v128_load32x2_s (load v128) - @simd V128Load32x2U { memarg: $crate::MemArg } => visit_v128_load32x2_u (load v128) - @simd V128Load8Splat { memarg: $crate::MemArg } => visit_v128_load8_splat (load v128) - @simd V128Load16Splat { memarg: $crate::MemArg } => visit_v128_load16_splat (load v128) - @simd V128Load32Splat { memarg: $crate::MemArg } => visit_v128_load32_splat (load v128) - @simd V128Load64Splat { memarg: $crate::MemArg } => visit_v128_load64_splat (load v128) - @simd V128Load32Zero { memarg: $crate::MemArg } => visit_v128_load32_zero (load v128) - @simd V128Load64Zero { memarg: $crate::MemArg } => visit_v128_load64_zero (load v128) - @simd V128Store { memarg: $crate::MemArg } => visit_v128_store (store v128) - @simd V128Load8Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_load8_lane (load lane 16) - @simd V128Load16Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_load16_lane (load lane 8) - @simd V128Load32Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_load32_lane (load lane 4) - @simd V128Load64Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_load64_lane (load lane 2) - @simd V128Store8Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_store8_lane (store lane 16) - @simd V128Store16Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_store16_lane (store lane 8) - @simd V128Store32Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_store32_lane (store lane 4) - @simd V128Store64Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_store64_lane (store lane 2) - @simd V128Const { value: $crate::V128 } => visit_v128_const (push v128) - @simd I8x16Shuffle { lanes: [u8; 16] } => visit_i8x16_shuffle (arity 2 -> 1) - @simd I8x16ExtractLaneS { lane: u8 } => visit_i8x16_extract_lane_s (extract i32 16) - @simd I8x16ExtractLaneU { lane: u8 } => visit_i8x16_extract_lane_u (extract i32 16) - @simd I8x16ReplaceLane { lane: u8 } => visit_i8x16_replace_lane (replace i32 16) - @simd I16x8ExtractLaneS { lane: u8 } => visit_i16x8_extract_lane_s (extract i32 8) - @simd I16x8ExtractLaneU { lane: u8 } => visit_i16x8_extract_lane_u (extract i32 8) - @simd I16x8ReplaceLane { lane: u8 } => visit_i16x8_replace_lane (replace i32 8) - @simd I32x4ExtractLane { lane: u8 } => visit_i32x4_extract_lane (extract i32 4) - @simd I32x4ReplaceLane { lane: u8 } => visit_i32x4_replace_lane (replace i32 4) - @simd I64x2ExtractLane { lane: u8 } => visit_i64x2_extract_lane (extract i64 2) - @simd I64x2ReplaceLane { lane: u8 } => visit_i64x2_replace_lane (replace i64 2) - @simd F32x4ExtractLane { lane: u8 } => visit_f32x4_extract_lane (extract f32 4) - @simd F32x4ReplaceLane { lane: u8 } => visit_f32x4_replace_lane (replace f32 4) - @simd F64x2ExtractLane { lane: u8 } => visit_f64x2_extract_lane (extract f64 2) - @simd F64x2ReplaceLane { lane: u8 } => visit_f64x2_replace_lane (replace f64 2) - @simd I8x16Swizzle => visit_i8x16_swizzle (binary v128) - @simd I8x16Splat => visit_i8x16_splat (splat i32) - @simd I16x8Splat => visit_i16x8_splat (splat i32) - @simd I32x4Splat => visit_i32x4_splat (splat i32) - @simd I64x2Splat => visit_i64x2_splat (splat i64) - @simd F32x4Splat => visit_f32x4_splat (splat f32) - @simd F64x2Splat => visit_f64x2_splat (splat f64) - @simd I8x16Eq => visit_i8x16_eq (binary v128) - @simd I8x16Ne => visit_i8x16_ne (binary v128) - @simd I8x16LtS => visit_i8x16_lt_s (binary v128) - @simd I8x16LtU => visit_i8x16_lt_u (binary v128) - @simd I8x16GtS => visit_i8x16_gt_s (binary v128) - @simd I8x16GtU => visit_i8x16_gt_u (binary v128) - @simd I8x16LeS => visit_i8x16_le_s (binary v128) - @simd I8x16LeU => visit_i8x16_le_u (binary v128) - @simd I8x16GeS => visit_i8x16_ge_s (binary v128) - @simd I8x16GeU => visit_i8x16_ge_u (binary v128) - @simd I16x8Eq => visit_i16x8_eq (binary v128) - @simd I16x8Ne => visit_i16x8_ne (binary v128) - @simd I16x8LtS => visit_i16x8_lt_s (binary v128) - @simd I16x8LtU => visit_i16x8_lt_u (binary v128) - @simd I16x8GtS => visit_i16x8_gt_s (binary v128) - @simd I16x8GtU => visit_i16x8_gt_u (binary v128) - @simd I16x8LeS => visit_i16x8_le_s (binary v128) - @simd I16x8LeU => visit_i16x8_le_u (binary v128) - @simd I16x8GeS => visit_i16x8_ge_s (binary v128) - @simd I16x8GeU => visit_i16x8_ge_u (binary v128) - @simd I32x4Eq => visit_i32x4_eq (binary v128) - @simd I32x4Ne => visit_i32x4_ne (binary v128) - @simd I32x4LtS => visit_i32x4_lt_s (binary v128) - @simd I32x4LtU => visit_i32x4_lt_u (binary v128) - @simd I32x4GtS => visit_i32x4_gt_s (binary v128) - @simd I32x4GtU => visit_i32x4_gt_u (binary v128) - @simd I32x4LeS => visit_i32x4_le_s (binary v128) - @simd I32x4LeU => visit_i32x4_le_u (binary v128) - @simd I32x4GeS => visit_i32x4_ge_s (binary v128) - @simd I32x4GeU => visit_i32x4_ge_u (binary v128) - @simd I64x2Eq => visit_i64x2_eq (binary v128) - @simd I64x2Ne => visit_i64x2_ne (binary v128) - @simd I64x2LtS => visit_i64x2_lt_s (binary v128) - @simd I64x2GtS => visit_i64x2_gt_s (binary v128) - @simd I64x2LeS => visit_i64x2_le_s (binary v128) - @simd I64x2GeS => visit_i64x2_ge_s (binary v128) - @simd F32x4Eq => visit_f32x4_eq (binary v128f) - @simd F32x4Ne => visit_f32x4_ne (binary v128f) - @simd F32x4Lt => visit_f32x4_lt (binary v128f) - @simd F32x4Gt => visit_f32x4_gt (binary v128f) - @simd F32x4Le => visit_f32x4_le (binary v128f) - @simd F32x4Ge => visit_f32x4_ge (binary v128f) - @simd F64x2Eq => visit_f64x2_eq (binary v128f) - @simd F64x2Ne => visit_f64x2_ne (binary v128f) - @simd F64x2Lt => visit_f64x2_lt (binary v128f) - @simd F64x2Gt => visit_f64x2_gt (binary v128f) - @simd F64x2Le => visit_f64x2_le (binary v128f) - @simd F64x2Ge => visit_f64x2_ge (binary v128f) - @simd V128Not => visit_v128_not (unary v128) - @simd V128And => visit_v128_and (binary v128) - @simd V128AndNot => visit_v128_andnot (binary v128) - @simd V128Or => visit_v128_or (binary v128) - @simd V128Xor => visit_v128_xor (binary v128) - @simd V128Bitselect => visit_v128_bitselect (ternary v128) - @simd V128AnyTrue => visit_v128_any_true (test v128) - @simd I8x16Abs => visit_i8x16_abs (unary v128) - @simd I8x16Neg => visit_i8x16_neg (unary v128) - @simd I8x16Popcnt => visit_i8x16_popcnt (unary v128) - @simd I8x16AllTrue => visit_i8x16_all_true (test v128) - @simd I8x16Bitmask => visit_i8x16_bitmask (test v128) - @simd I8x16NarrowI16x8S => visit_i8x16_narrow_i16x8_s (binary v128) - @simd I8x16NarrowI16x8U => visit_i8x16_narrow_i16x8_u (binary v128) - @simd I8x16Shl => visit_i8x16_shl (shift v128) - @simd I8x16ShrS => visit_i8x16_shr_s (shift v128) - @simd I8x16ShrU => visit_i8x16_shr_u (shift v128) - @simd I8x16Add => visit_i8x16_add (binary v128) - @simd I8x16AddSatS => visit_i8x16_add_sat_s (binary v128) - @simd I8x16AddSatU => visit_i8x16_add_sat_u (binary v128) - @simd I8x16Sub => visit_i8x16_sub (binary v128) - @simd I8x16SubSatS => visit_i8x16_sub_sat_s (binary v128) - @simd I8x16SubSatU => visit_i8x16_sub_sat_u (binary v128) - @simd I8x16MinS => visit_i8x16_min_s (binary v128) - @simd I8x16MinU => visit_i8x16_min_u (binary v128) - @simd I8x16MaxS => visit_i8x16_max_s (binary v128) - @simd I8x16MaxU => visit_i8x16_max_u (binary v128) - @simd I8x16AvgrU => visit_i8x16_avgr_u (binary v128) - @simd I16x8ExtAddPairwiseI8x16S => visit_i16x8_extadd_pairwise_i8x16_s (unary v128) - @simd I16x8ExtAddPairwiseI8x16U => visit_i16x8_extadd_pairwise_i8x16_u (unary v128) - @simd I16x8Abs => visit_i16x8_abs (unary v128) - @simd I16x8Neg => visit_i16x8_neg (unary v128) - @simd I16x8Q15MulrSatS => visit_i16x8_q15mulr_sat_s (binary v128) - @simd I16x8AllTrue => visit_i16x8_all_true (test v128) - @simd I16x8Bitmask => visit_i16x8_bitmask (test v128) - @simd I16x8NarrowI32x4S => visit_i16x8_narrow_i32x4_s (binary v128) - @simd I16x8NarrowI32x4U => visit_i16x8_narrow_i32x4_u (binary v128) - @simd I16x8ExtendLowI8x16S => visit_i16x8_extend_low_i8x16_s (unary v128) - @simd I16x8ExtendHighI8x16S => visit_i16x8_extend_high_i8x16_s (unary v128) - @simd I16x8ExtendLowI8x16U => visit_i16x8_extend_low_i8x16_u (unary v128) - @simd I16x8ExtendHighI8x16U => visit_i16x8_extend_high_i8x16_u (unary v128) - @simd I16x8Shl => visit_i16x8_shl (shift v128) - @simd I16x8ShrS => visit_i16x8_shr_s (shift v128) - @simd I16x8ShrU => visit_i16x8_shr_u (shift v128) - @simd I16x8Add => visit_i16x8_add (binary v128) - @simd I16x8AddSatS => visit_i16x8_add_sat_s (binary v128) - @simd I16x8AddSatU => visit_i16x8_add_sat_u (binary v128) - @simd I16x8Sub => visit_i16x8_sub (binary v128) - @simd I16x8SubSatS => visit_i16x8_sub_sat_s (binary v128) - @simd I16x8SubSatU => visit_i16x8_sub_sat_u (binary v128) - @simd I16x8Mul => visit_i16x8_mul (binary v128) - @simd I16x8MinS => visit_i16x8_min_s (binary v128) - @simd I16x8MinU => visit_i16x8_min_u (binary v128) - @simd I16x8MaxS => visit_i16x8_max_s (binary v128) - @simd I16x8MaxU => visit_i16x8_max_u (binary v128) - @simd I16x8AvgrU => visit_i16x8_avgr_u (binary v128) - @simd I16x8ExtMulLowI8x16S => visit_i16x8_extmul_low_i8x16_s (binary v128) - @simd I16x8ExtMulHighI8x16S => visit_i16x8_extmul_high_i8x16_s (binary v128) - @simd I16x8ExtMulLowI8x16U => visit_i16x8_extmul_low_i8x16_u (binary v128) - @simd I16x8ExtMulHighI8x16U => visit_i16x8_extmul_high_i8x16_u (binary v128) - @simd I32x4ExtAddPairwiseI16x8S => visit_i32x4_extadd_pairwise_i16x8_s (unary v128) - @simd I32x4ExtAddPairwiseI16x8U => visit_i32x4_extadd_pairwise_i16x8_u (unary v128) - @simd I32x4Abs => visit_i32x4_abs (unary v128) - @simd I32x4Neg => visit_i32x4_neg (unary v128) - @simd I32x4AllTrue => visit_i32x4_all_true (test v128) - @simd I32x4Bitmask => visit_i32x4_bitmask (test v128) - @simd I32x4ExtendLowI16x8S => visit_i32x4_extend_low_i16x8_s (unary v128) - @simd I32x4ExtendHighI16x8S => visit_i32x4_extend_high_i16x8_s (unary v128) - @simd I32x4ExtendLowI16x8U => visit_i32x4_extend_low_i16x8_u (unary v128) - @simd I32x4ExtendHighI16x8U => visit_i32x4_extend_high_i16x8_u (unary v128) - @simd I32x4Shl => visit_i32x4_shl (shift v128) - @simd I32x4ShrS => visit_i32x4_shr_s (shift v128) - @simd I32x4ShrU => visit_i32x4_shr_u (shift v128) - @simd I32x4Add => visit_i32x4_add (binary v128) - @simd I32x4Sub => visit_i32x4_sub (binary v128) - @simd I32x4Mul => visit_i32x4_mul (binary v128) - @simd I32x4MinS => visit_i32x4_min_s (binary v128) - @simd I32x4MinU => visit_i32x4_min_u (binary v128) - @simd I32x4MaxS => visit_i32x4_max_s (binary v128) - @simd I32x4MaxU => visit_i32x4_max_u (binary v128) - @simd I32x4DotI16x8S => visit_i32x4_dot_i16x8_s (binary v128) - @simd I32x4ExtMulLowI16x8S => visit_i32x4_extmul_low_i16x8_s (binary v128) - @simd I32x4ExtMulHighI16x8S => visit_i32x4_extmul_high_i16x8_s (binary v128) - @simd I32x4ExtMulLowI16x8U => visit_i32x4_extmul_low_i16x8_u (binary v128) - @simd I32x4ExtMulHighI16x8U => visit_i32x4_extmul_high_i16x8_u (binary v128) - @simd I64x2Abs => visit_i64x2_abs (unary v128) - @simd I64x2Neg => visit_i64x2_neg (unary v128) - @simd I64x2AllTrue => visit_i64x2_all_true (test v128) - @simd I64x2Bitmask => visit_i64x2_bitmask (test v128) - @simd I64x2ExtendLowI32x4S => visit_i64x2_extend_low_i32x4_s (unary v128) - @simd I64x2ExtendHighI32x4S => visit_i64x2_extend_high_i32x4_s (unary v128) - @simd I64x2ExtendLowI32x4U => visit_i64x2_extend_low_i32x4_u (unary v128) - @simd I64x2ExtendHighI32x4U => visit_i64x2_extend_high_i32x4_u (unary v128) - @simd I64x2Shl => visit_i64x2_shl (shift v128) - @simd I64x2ShrS => visit_i64x2_shr_s (shift v128) - @simd I64x2ShrU => visit_i64x2_shr_u (shift v128) - @simd I64x2Add => visit_i64x2_add (binary v128) - @simd I64x2Sub => visit_i64x2_sub (binary v128) - @simd I64x2Mul => visit_i64x2_mul (binary v128) - @simd I64x2ExtMulLowI32x4S => visit_i64x2_extmul_low_i32x4_s (binary v128) - @simd I64x2ExtMulHighI32x4S => visit_i64x2_extmul_high_i32x4_s (binary v128) - @simd I64x2ExtMulLowI32x4U => visit_i64x2_extmul_low_i32x4_u (binary v128) - @simd I64x2ExtMulHighI32x4U => visit_i64x2_extmul_high_i32x4_u (binary v128) - @simd F32x4Ceil => visit_f32x4_ceil (unary v128f) - @simd F32x4Floor => visit_f32x4_floor (unary v128f) - @simd F32x4Trunc => visit_f32x4_trunc (unary v128f) - @simd F32x4Nearest => visit_f32x4_nearest (unary v128f) - @simd F32x4Abs => visit_f32x4_abs (unary v128f) - @simd F32x4Neg => visit_f32x4_neg (unary v128f) - @simd F32x4Sqrt => visit_f32x4_sqrt (unary v128f) - @simd F32x4Add => visit_f32x4_add (binary v128f) - @simd F32x4Sub => visit_f32x4_sub (binary v128f) - @simd F32x4Mul => visit_f32x4_mul (binary v128f) - @simd F32x4Div => visit_f32x4_div (binary v128f) - @simd F32x4Min => visit_f32x4_min (binary v128f) - @simd F32x4Max => visit_f32x4_max (binary v128f) - @simd F32x4PMin => visit_f32x4_pmin (binary v128f) - @simd F32x4PMax => visit_f32x4_pmax (binary v128f) - @simd F64x2Ceil => visit_f64x2_ceil (unary v128f) - @simd F64x2Floor => visit_f64x2_floor (unary v128f) - @simd F64x2Trunc => visit_f64x2_trunc (unary v128f) - @simd F64x2Nearest => visit_f64x2_nearest (unary v128f) - @simd F64x2Abs => visit_f64x2_abs (unary v128f) - @simd F64x2Neg => visit_f64x2_neg (unary v128f) - @simd F64x2Sqrt => visit_f64x2_sqrt (unary v128f) - @simd F64x2Add => visit_f64x2_add (binary v128f) - @simd F64x2Sub => visit_f64x2_sub (binary v128f) - @simd F64x2Mul => visit_f64x2_mul (binary v128f) - @simd F64x2Div => visit_f64x2_div (binary v128f) - @simd F64x2Min => visit_f64x2_min (binary v128f) - @simd F64x2Max => visit_f64x2_max (binary v128f) - @simd F64x2PMin => visit_f64x2_pmin (binary v128f) - @simd F64x2PMax => visit_f64x2_pmax (binary v128f) - @simd I32x4TruncSatF32x4S => visit_i32x4_trunc_sat_f32x4_s (unary v128f) - @simd I32x4TruncSatF32x4U => visit_i32x4_trunc_sat_f32x4_u (unary v128f) - @simd F32x4ConvertI32x4S => visit_f32x4_convert_i32x4_s (unary v128f) - @simd F32x4ConvertI32x4U => visit_f32x4_convert_i32x4_u (unary v128f) - @simd I32x4TruncSatF64x2SZero => visit_i32x4_trunc_sat_f64x2_s_zero (unary v128f) - @simd I32x4TruncSatF64x2UZero => visit_i32x4_trunc_sat_f64x2_u_zero (unary v128f) - @simd F64x2ConvertLowI32x4S => visit_f64x2_convert_low_i32x4_s (unary v128f) - @simd F64x2ConvertLowI32x4U => visit_f64x2_convert_low_i32x4_u (unary v128f) - @simd F32x4DemoteF64x2Zero => visit_f32x4_demote_f64x2_zero (unary v128f) - @simd F64x2PromoteLowF32x4 => visit_f64x2_promote_low_f32x4 (unary v128f) - - // Relaxed SIMD operators - // https://github.com/WebAssembly/relaxed-simd - @relaxed_simd I8x16RelaxedSwizzle => visit_i8x16_relaxed_swizzle (binary v128) - @relaxed_simd I32x4RelaxedTruncF32x4S => visit_i32x4_relaxed_trunc_f32x4_s (unary v128) - @relaxed_simd I32x4RelaxedTruncF32x4U => visit_i32x4_relaxed_trunc_f32x4_u (unary v128) - @relaxed_simd I32x4RelaxedTruncF64x2SZero => visit_i32x4_relaxed_trunc_f64x2_s_zero (unary v128) - @relaxed_simd I32x4RelaxedTruncF64x2UZero => visit_i32x4_relaxed_trunc_f64x2_u_zero (unary v128) - @relaxed_simd F32x4RelaxedMadd => visit_f32x4_relaxed_madd (ternary v128) - @relaxed_simd F32x4RelaxedNmadd => visit_f32x4_relaxed_nmadd (ternary v128) - @relaxed_simd F64x2RelaxedMadd => visit_f64x2_relaxed_madd (ternary v128) - @relaxed_simd F64x2RelaxedNmadd => visit_f64x2_relaxed_nmadd (ternary v128) - @relaxed_simd I8x16RelaxedLaneselect => visit_i8x16_relaxed_laneselect (ternary v128) - @relaxed_simd I16x8RelaxedLaneselect => visit_i16x8_relaxed_laneselect (ternary v128) - @relaxed_simd I32x4RelaxedLaneselect => visit_i32x4_relaxed_laneselect (ternary v128) - @relaxed_simd I64x2RelaxedLaneselect => visit_i64x2_relaxed_laneselect (ternary v128) - @relaxed_simd F32x4RelaxedMin => visit_f32x4_relaxed_min (binary v128) - @relaxed_simd F32x4RelaxedMax => visit_f32x4_relaxed_max (binary v128) - @relaxed_simd F64x2RelaxedMin => visit_f64x2_relaxed_min (binary v128) - @relaxed_simd F64x2RelaxedMax => visit_f64x2_relaxed_max (binary v128) - @relaxed_simd I16x8RelaxedQ15mulrS => visit_i16x8_relaxed_q15mulr_s (binary v128) - @relaxed_simd I16x8RelaxedDotI8x16I7x16S => visit_i16x8_relaxed_dot_i8x16_i7x16_s (binary v128) - @relaxed_simd I32x4RelaxedDotI8x16I7x16AddS => visit_i32x4_relaxed_dot_i8x16_i7x16_add_s (ternary v128) - // Typed Function references @function_references CallRef { type_index: u32 } => visit_call_ref (arity 1 type -> type) @function_references ReturnCallRef { type_index: u32 } => visit_return_call_ref (arity 1 type -> 0) From 531240ebd7ccdffd386457cd49e7878f58db5f41 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 19 Nov 2024 11:41:47 +0100 Subject: [PATCH 15/83] adjust wasmprinter crate for simd crate feature --- crates/wasmprinter/Cargo.toml | 3 ++- crates/wasmprinter/src/operator.rs | 12 +++++++++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/crates/wasmprinter/Cargo.toml b/crates/wasmprinter/Cargo.toml index d873494223..faf62bcc4d 100644 --- a/crates/wasmprinter/Cargo.toml +++ b/crates/wasmprinter/Cargo.toml @@ -25,5 +25,6 @@ termcolor = { workspace = true } wat = { path = "../wat" } [features] -default = ['component-model'] +default = ['component-model', 'simd'] component-model = ['wasmparser/component-model'] +simd = ['wasmparser/simd'] diff --git a/crates/wasmprinter/src/operator.rs b/crates/wasmprinter/src/operator.rs index 6456ecc3d9..63f099f231 100644 --- a/crates/wasmprinter/src/operator.rs +++ b/crates/wasmprinter/src/operator.rs @@ -4,7 +4,7 @@ use termcolor::{Ansi, NoColor}; use wasmparser::{ BinaryReader, BlockType, BrTable, Catch, CompositeInnerType, ContType, FrameKind, FuncType, Handle, MemArg, ModuleArity, Operator, Ordering, RefType, ResumeTable, SubType, TryTable, - VisitOperator, + VisitOperator, VisitSimdOperator, }; pub struct OperatorState { @@ -1389,9 +1389,19 @@ macro_rules! define_visit { impl<'a> VisitOperator<'a> for PrintOperator<'_, '_, '_, '_> { type Output = Result<()>; + #[cfg(feature = "simd")] + fn simd_visitor(&mut self) -> Option<&mut dyn VisitSimdOperator<'a, Output = Self::Output>> { + Some(self) + } + wasmparser::for_each_operator!(define_visit); } +#[cfg(feature = "simd")] +impl<'a> VisitSimdOperator<'a> for PrintOperator<'_, '_, '_, '_> { + wasmparser::for_each_simd_operator!(define_visit); +} + pub trait OpPrinter { fn branch_hint(&mut self, offset: usize, taken: bool) -> Result<()>; fn set_offset(&mut self, offset: usize); From 5c94737bf7b8f3b2023480891099afaf36d0d63e Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 19 Nov 2024 11:43:47 +0100 Subject: [PATCH 16/83] enable wasmparser's simd feature by default --- crates/wasmparser/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/wasmparser/Cargo.toml b/crates/wasmparser/Cargo.toml index 2b628883e1..12266c9033 100644 --- a/crates/wasmparser/Cargo.toml +++ b/crates/wasmparser/Cargo.toml @@ -43,7 +43,7 @@ name = "benchmark" harness = false [features] -default = ['std', 'validate', 'serde', 'features', 'component-model', 'hash-collections'] +default = ['std', 'validate', 'serde', 'features', 'component-model', 'hash-collections', 'simd'] # A feature which enables implementations of `std::error::Error` as appropriate # along with other convenience APIs. This additionally uses the standard From bfb354a8362e8c17f0b82c3b7094067fb68ded3c Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 19 Nov 2024 11:43:55 +0100 Subject: [PATCH 17/83] fix trait impl signature --- crates/wasmparser/src/binary_reader.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/wasmparser/src/binary_reader.rs b/crates/wasmparser/src/binary_reader.rs index 0dba2ac02a..0593c1215e 100644 --- a/crates/wasmparser/src/binary_reader.rs +++ b/crates/wasmparser/src/binary_reader.rs @@ -2106,7 +2106,7 @@ impl<'a> VisitOperator<'a> for OperatorFactory<'a> { type Output = Operator<'a>; #[cfg(feature = "simd")] - fn simd_visitor(&mut self) -> Option<&mut dyn VisitSimdOperator> { + fn simd_visitor(&mut self) -> Option<&mut dyn VisitSimdOperator<'a, Output = Self::Output>> { Some(self) } From e257fcb617d5017292720f33aed1f3a25e06899a Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 19 Nov 2024 11:50:41 +0100 Subject: [PATCH 18/83] add docs to wasmparser's simd crate feature --- crates/wasmparser/Cargo.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crates/wasmparser/Cargo.toml b/crates/wasmparser/Cargo.toml index 12266c9033..05974011ff 100644 --- a/crates/wasmparser/Cargo.toml +++ b/crates/wasmparser/Cargo.toml @@ -85,4 +85,7 @@ features = [] # interested in working with core modules then this feature can be disabled. component-model = [] +# A feature that enables parsing and validating the `simd` and `relaxed-simd` +# proposals for WebAssembly. This is enabled by default but if your use case is +# only interested in working on non-SIMD code then this feature can be disabled. simd = [] From eb3845df64fba1a3afe6af8c1c30d27ad90a7ca6 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 19 Nov 2024 12:07:35 +0100 Subject: [PATCH 19/83] feature gate simd related code in wasmparser --- crates/wasmparser/benches/benchmark.rs | 2 +- crates/wasmparser/src/arity.rs | 7 ++++++- crates/wasmparser/src/binary_reader.rs | 5 ++++- crates/wasmparser/src/lib.rs | 1 + crates/wasmparser/src/readers/core/operators.rs | 14 +++++++++++++- crates/wasmparser/src/validator/operators.rs | 5 ++++- 6 files changed, 29 insertions(+), 5 deletions(-) diff --git a/crates/wasmparser/benches/benchmark.rs b/crates/wasmparser/benches/benchmark.rs index c5eb773746..2b0a3b5ef9 100644 --- a/crates/wasmparser/benches/benchmark.rs +++ b/crates/wasmparser/benches/benchmark.rs @@ -1,10 +1,10 @@ use anyhow::Result; use criterion::{criterion_group, criterion_main, Criterion}; use once_cell::unsync::Lazy; -use wasmparser::VisitSimdOperator; use std::fs; use std::path::Path; use std::path::PathBuf; +use wasmparser::VisitSimdOperator; use wasmparser::{DataKind, ElementKind, Parser, Payload, Validator, VisitOperator, WasmFeatures}; /// A benchmark input. diff --git a/crates/wasmparser/src/arity.rs b/crates/wasmparser/src/arity.rs index d8876c3eae..ebd7da4512 100644 --- a/crates/wasmparser/src/arity.rs +++ b/crates/wasmparser/src/arity.rs @@ -13,8 +13,11 @@ * limitations under the License. */ +#[cfg(feature = "simd")] +use crate::SimdOperator; use crate::{ - BinaryReader, BinaryReaderError, BlockType, CompositeInnerType, ContType, FrameKind, FuncType, Operator, RefType, Result, SimdOperator, SubType + BinaryReader, BinaryReaderError, BlockType, CompositeInnerType, ContType, FrameKind, FuncType, + Operator, RefType, Result, SubType, }; /// To compute the arity (param and result counts) of "variable-arity" @@ -249,6 +252,7 @@ impl Operator<'_> { operator_arity!(arity module $({ $($arg: $argty),* })? $($ann)*) } )* + #[cfg(feature = "simd")] Self::Simd(operator) => operator.operator_arity(), } ); @@ -257,6 +261,7 @@ impl Operator<'_> { } } +#[cfg(feature = "simd")] impl SimdOperator { /// Compute the arity (param and result counts) of the operator, given /// an impl ModuleArity, which stores the necessary module state. diff --git a/crates/wasmparser/src/binary_reader.rs b/crates/wasmparser/src/binary_reader.rs index 0593c1215e..32f97bce08 100644 --- a/crates/wasmparser/src/binary_reader.rs +++ b/crates/wasmparser/src/binary_reader.rs @@ -1125,7 +1125,7 @@ impl<'a> BinaryReader<'a> { bail!(pos, "unexpected SIMD opcode: 0x{code:x}") }; self.visit_0xfd_operator(pos, &mut visitor)? - }, + } 0xfe => self.visit_0xfe_operator(pos, visitor)?, _ => bail!(pos, "illegal opcode: 0x{code:x}"), @@ -1371,6 +1371,7 @@ impl<'a> BinaryReader<'a> { }) } + #[cfg(feature = "simd")] fn visit_0xfd_operator( &mut self, pos: usize, @@ -2113,6 +2114,7 @@ impl<'a> VisitOperator<'a> for OperatorFactory<'a> { for_each_operator!(define_visit_operator); } +#[cfg(feature = "simd")] macro_rules! define_visit_simd_operator { ($(@$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => { $( @@ -2123,6 +2125,7 @@ macro_rules! define_visit_simd_operator { } } +#[cfg(feature = "simd")] impl<'a> VisitSimdOperator<'a> for OperatorFactory<'a> { for_each_simd_operator!(define_visit_simd_operator); } diff --git a/crates/wasmparser/src/lib.rs b/crates/wasmparser/src/lib.rs index 7a538f9b11..cf1b5f80ad 100644 --- a/crates/wasmparser/src/lib.rs +++ b/crates/wasmparser/src/lib.rs @@ -623,6 +623,7 @@ macro_rules! for_each_operator { /// Docs: TODO #[macro_export] +#[cfg(feature = "simd")] macro_rules! for_each_simd_operator { ($mac:ident) => { $mac! { diff --git a/crates/wasmparser/src/readers/core/operators.rs b/crates/wasmparser/src/readers/core/operators.rs index 5d519d02d9..96004167a3 100644 --- a/crates/wasmparser/src/readers/core/operators.rs +++ b/crates/wasmparser/src/readers/core/operators.rs @@ -220,16 +220,19 @@ macro_rules! define_operator { /// [here]: https://webassembly.github.io/spec/core/binary/instructions.html #[derive(Debug, Clone, Eq, PartialEq)] #[allow(missing_docs)] + #[non_exhaustive] pub enum Operator<'a> { $( $op $({ $($payload)* })?, )* + #[cfg(feature = "simd")] Simd(SimdOperator), } } } for_each_operator!(define_operator); +#[cfg(feature = "simd")] macro_rules! define_simd_operator { ($(@$proposal:ident $op:ident $({ $($payload:tt)* })? => $visit:ident ($($ann:tt)*))*) => { /// SIMD instructions as defined [here]. @@ -244,6 +247,7 @@ macro_rules! define_simd_operator { } } } +#[cfg(feature = "simd")] for_each_simd_operator!(define_simd_operator); /// A reader for a core WebAssembly function's operators. @@ -452,13 +456,17 @@ pub trait VisitOperator<'a> { for_each_operator!(visit_operator) } - fn simd_visitor(&mut self) -> Option<&mut dyn VisitSimdOperator<'a, Output = Self::Output>> { None } + #[cfg(feature = "simd")] + fn simd_visitor(&mut self) -> Option<&mut dyn VisitSimdOperator<'a, Output = Self::Output>> { + None + } for_each_operator!(define_visit_operator); } /// Trait implemented by types that can visit all [`Operator`] variants. #[allow(missing_docs)] +#[cfg(feature = "simd")] pub trait VisitSimdOperator<'a>: VisitOperator<'a> { /// Visits the SIMD [`Operator`] `op` using the given `offset`. /// @@ -499,12 +507,14 @@ impl<'a, 'b, V: VisitOperator<'a> + ?Sized> VisitOperator<'a> for &'b mut V { fn visit_operator(&mut self, op: &Operator<'a>) -> Self::Output { V::visit_operator(*self, op) } + #[cfg(feature = "simd")] fn simd_visitor(&mut self) -> Option<&mut dyn VisitSimdOperator<'a, Output = V::Output>> { V::simd_visitor(*self) } for_each_operator!(define_visit_operator_delegate); } +#[cfg(feature = "simd")] impl<'a, 'b, V: VisitSimdOperator<'a> + ?Sized> VisitSimdOperator<'a> for &'b mut V { fn visit_simd_operator(&mut self, op: &SimdOperator) -> Self::Output { V::visit_simd_operator(*self, op) @@ -517,12 +527,14 @@ impl<'a, V: VisitOperator<'a> + ?Sized> VisitOperator<'a> for Box { fn visit_operator(&mut self, op: &Operator<'a>) -> Self::Output { V::visit_operator(&mut *self, op) } + #[cfg(feature = "simd")] fn simd_visitor(&mut self) -> Option<&mut dyn VisitSimdOperator<'a, Output = V::Output>> { V::simd_visitor(&mut *self) } for_each_operator!(define_visit_operator_delegate); } +#[cfg(feature = "simd")] impl<'a, V: VisitSimdOperator<'a> + ?Sized> VisitSimdOperator<'a> for Box { fn visit_simd_operator(&mut self, op: &SimdOperator) -> Self::Output { V::visit_simd_operator(&mut *self, op) diff --git a/crates/wasmparser/src/validator/operators.rs b/crates/wasmparser/src/validator/operators.rs index 434377d156..a333182d50 100644 --- a/crates/wasmparser/src/validator/operators.rs +++ b/crates/wasmparser/src/validator/operators.rs @@ -22,13 +22,15 @@ // confusing it's recommended to read over that section to see how it maps to // the various methods here. +#[cfg(feature = "simd")] +use crate::VisitSimdOperator; use crate::{ limits::MAX_WASM_FUNCTION_LOCALS, AbstractHeapType, BinaryReaderError, BlockType, BrTable, Catch, ContType, FieldType, FrameKind, FuncType, GlobalType, Handle, HeapType, Ieee32, Ieee64, MemArg, ModuleArity, RefType, Result, ResumeTable, StorageType, StructType, SubType, TableType, TryTable, UnpackedIndex, ValType, VisitOperator, WasmFeatures, WasmModuleResources, V128, }; -use crate::{prelude::*, CompositeInnerType, Ordering, VisitSimdOperator}; +use crate::{prelude::*, CompositeInnerType, Ordering}; use core::ops::{Deref, DerefMut}; pub(crate) struct OperatorValidator { @@ -4274,6 +4276,7 @@ where } } +#[cfg(feature = "simd")] impl<'a, T> VisitSimdOperator<'a> for OperatorValidatorTemp<'_, '_, T> where T: WasmModuleResources, From 77a33def6cedf6e811638352bd0b7e4e41c33742 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 19 Nov 2024 12:09:22 +0100 Subject: [PATCH 20/83] update docs for SimdOperator --- crates/wasmparser/src/readers/core/operators.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/wasmparser/src/readers/core/operators.rs b/crates/wasmparser/src/readers/core/operators.rs index 96004167a3..cb666f1535 100644 --- a/crates/wasmparser/src/readers/core/operators.rs +++ b/crates/wasmparser/src/readers/core/operators.rs @@ -235,7 +235,7 @@ for_each_operator!(define_operator); #[cfg(feature = "simd")] macro_rules! define_simd_operator { ($(@$proposal:ident $op:ident $({ $($payload:tt)* })? => $visit:ident ($($ann:tt)*))*) => { - /// SIMD instructions as defined [here]. + /// The subset of Wasm SIMD instructions as defined [here]. /// /// [here]: https://webassembly.github.io/spec/core/binary/instructions.html #[derive(Debug, Clone, Eq, PartialEq)] From 7439a1fd1517507c826988af7215afbcb2f2d4d3 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 19 Nov 2024 13:00:57 +0100 Subject: [PATCH 21/83] add missing `simd` crate feature gates --- crates/wasmparser/src/binary_reader.rs | 11 ++++--- .../wasmparser/src/readers/core/operators.rs | 1 + crates/wasmparser/src/validator/operators.rs | 31 +++++++++++++------ 3 files changed, 29 insertions(+), 14 deletions(-) diff --git a/crates/wasmparser/src/binary_reader.rs b/crates/wasmparser/src/binary_reader.rs index 32f97bce08..2099c4aaaa 100644 --- a/crates/wasmparser/src/binary_reader.rs +++ b/crates/wasmparser/src/binary_reader.rs @@ -1121,10 +1121,11 @@ impl<'a> BinaryReader<'a> { 0xfb => self.visit_0xfb_operator(pos, visitor)?, 0xfc => self.visit_0xfc_operator(pos, visitor)?, 0xfd => { - let Some(mut visitor) = visitor.simd_visitor() else { - bail!(pos, "unexpected SIMD opcode: 0x{code:x}") - }; - self.visit_0xfd_operator(pos, &mut visitor)? + #[cfg(feature = "simd")] + if let Some(mut visitor) = visitor.simd_visitor() { + return self.visit_0xfd_operator(pos, &mut visitor); + } + bail!(pos, "unexpected SIMD opcode: 0x{code:x}") } 0xfe => self.visit_0xfe_operator(pos, visitor)?, @@ -1893,6 +1894,7 @@ impl<'a> BinaryReader<'a> { self.remaining_buffer() == &[0x0b] } + #[cfg(feature = "simd")] fn read_lane_index(&mut self, max: u8) -> Result { let index = self.read_u8()?; if index >= max { @@ -1904,6 +1906,7 @@ impl<'a> BinaryReader<'a> { Ok(index) } + #[cfg(feature = "simd")] fn read_v128(&mut self) -> Result { let mut bytes = [0; 16]; bytes.clone_from_slice(self.read_bytes(16)?); diff --git a/crates/wasmparser/src/readers/core/operators.rs b/crates/wasmparser/src/readers/core/operators.rs index cb666f1535..f25f57bc58 100644 --- a/crates/wasmparser/src/readers/core/operators.rs +++ b/crates/wasmparser/src/readers/core/operators.rs @@ -444,6 +444,7 @@ pub trait VisitOperator<'a> { $( Operator::$op $({ $($arg),* })? => self.$visit($($($arg.clone()),*)?), )* + #[cfg(feature = "simd")] Operator::Simd(op) => { let Some(visitor) = self.simd_visitor() else { panic!("missing SIMD visitor for: {op:?}") diff --git a/crates/wasmparser/src/validator/operators.rs b/crates/wasmparser/src/validator/operators.rs index a333182d50..1f1d9bfbc8 100644 --- a/crates/wasmparser/src/validator/operators.rs +++ b/crates/wasmparser/src/validator/operators.rs @@ -22,15 +22,15 @@ // confusing it's recommended to read over that section to see how it maps to // the various methods here. -#[cfg(feature = "simd")] -use crate::VisitSimdOperator; use crate::{ limits::MAX_WASM_FUNCTION_LOCALS, AbstractHeapType, BinaryReaderError, BlockType, BrTable, Catch, ContType, FieldType, FrameKind, FuncType, GlobalType, Handle, HeapType, Ieee32, Ieee64, MemArg, ModuleArity, RefType, Result, ResumeTable, StorageType, StructType, SubType, TableType, - TryTable, UnpackedIndex, ValType, VisitOperator, WasmFeatures, WasmModuleResources, V128, + TryTable, UnpackedIndex, ValType, VisitOperator, WasmFeatures, WasmModuleResources, }; use crate::{prelude::*, CompositeInnerType, Ordering}; +#[cfg(feature = "simd")] +use crate::{VisitSimdOperator, V128}; use core::ops::{Deref, DerefMut}; pub(crate) struct OperatorValidator { @@ -1024,13 +1024,6 @@ where self.check_memory_index(memarg.memory) } - fn check_simd_lane_index(&self, index: u8, max: u8) -> Result<()> { - if index >= max { - bail!(self.offset, "SIMD index out of bounds"); - } - Ok(()) - } - /// Validates a block type, primarily with various in-flight proposals. fn check_block_type(&self, ty: &mut BlockType) -> Result<()> { match ty { @@ -1287,6 +1280,19 @@ where self.push_operand(op_ty)?; Ok(()) } +} + +#[cfg(feature = "simd")] +impl<'resources, R> OperatorValidatorTemp<'_, 'resources, R> +where + R: WasmModuleResources, +{ + fn check_simd_lane_index(&self, index: u8, max: u8) -> Result<()> { + if index >= max { + bail!(self.offset, "SIMD index out of bounds"); + } + Ok(()) + } /// Checks a [`V128`] splat operator. fn check_v128_splat(&mut self, src_ty: ValType) -> Result<()> { @@ -1353,7 +1359,12 @@ where self.push_operand(ValType::V128)?; Ok(()) } +} +impl<'resources, R> OperatorValidatorTemp<'_, 'resources, R> +where + R: WasmModuleResources, +{ /// Common helper for `ref.test` and `ref.cast` downcasting/checking /// instructions. Returns the given `heap_type` as a `ValType`. fn check_downcast(&mut self, nullable: bool, mut heap_type: HeapType) -> Result { From c373909d579725e6d3bf10570ed314bd07c22bf1 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 19 Nov 2024 16:27:17 +0100 Subject: [PATCH 22/83] move simd op validation down in file --- crates/wasmparser/src/validator/operators.rs | 163 +++++++++---------- 1 file changed, 79 insertions(+), 84 deletions(-) diff --git a/crates/wasmparser/src/validator/operators.rs b/crates/wasmparser/src/validator/operators.rs index 1f1d9bfbc8..85535a1061 100644 --- a/crates/wasmparser/src/validator/operators.rs +++ b/crates/wasmparser/src/validator/operators.rs @@ -1280,91 +1280,7 @@ where self.push_operand(op_ty)?; Ok(()) } -} - -#[cfg(feature = "simd")] -impl<'resources, R> OperatorValidatorTemp<'_, 'resources, R> -where - R: WasmModuleResources, -{ - fn check_simd_lane_index(&self, index: u8, max: u8) -> Result<()> { - if index >= max { - bail!(self.offset, "SIMD index out of bounds"); - } - Ok(()) - } - - /// Checks a [`V128`] splat operator. - fn check_v128_splat(&mut self, src_ty: ValType) -> Result<()> { - self.pop_operand(Some(src_ty))?; - self.push_operand(ValType::V128)?; - Ok(()) - } - - /// Checks a [`V128`] binary operator. - fn check_v128_binary_op(&mut self) -> Result<()> { - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::V128)?; - Ok(()) - } - - /// Checks a [`V128`] binary float operator. - fn check_v128_fbinary_op(&mut self) -> Result<()> { - self.check_floats_enabled()?; - self.check_v128_binary_op() - } - - /// Checks a [`V128`] unary operator. - fn check_v128_unary_op(&mut self) -> Result<()> { - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::V128)?; - Ok(()) - } - /// Checks a [`V128`] unary float operator. - fn check_v128_funary_op(&mut self) -> Result<()> { - self.check_floats_enabled()?; - self.check_v128_unary_op() - } - - /// Checks a [`V128`] relaxed ternary operator. - fn check_v128_ternary_op(&mut self) -> Result<()> { - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::V128)?; - Ok(()) - } - - /// Checks a [`V128`] test operator. - fn check_v128_bitmask_op(&mut self) -> Result<()> { - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::I32)?; - Ok(()) - } - - /// Checks a [`V128`] shift operator. - fn check_v128_shift_op(&mut self) -> Result<()> { - self.pop_operand(Some(ValType::I32))?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::V128)?; - Ok(()) - } - - /// Checks a [`V128`] common load operator. - fn check_v128_load_op(&mut self, memarg: MemArg) -> Result<()> { - let idx = self.check_memarg(memarg)?; - self.pop_operand(Some(idx))?; - self.push_operand(ValType::V128)?; - Ok(()) - } -} - -impl<'resources, R> OperatorValidatorTemp<'_, 'resources, R> -where - R: WasmModuleResources, -{ /// Common helper for `ref.test` and `ref.cast` downcasting/checking /// instructions. Returns the given `heap_type` as a `ValType`. fn check_downcast(&mut self, nullable: bool, mut heap_type: HeapType) -> Result { @@ -1765,6 +1681,85 @@ where } } +#[cfg(feature = "simd")] +impl<'resources, R> OperatorValidatorTemp<'_, 'resources, R> +where + R: WasmModuleResources, +{ + fn check_simd_lane_index(&self, index: u8, max: u8) -> Result<()> { + if index >= max { + bail!(self.offset, "SIMD index out of bounds"); + } + Ok(()) + } + + /// Checks a [`V128`] splat operator. + fn check_v128_splat(&mut self, src_ty: ValType) -> Result<()> { + self.pop_operand(Some(src_ty))?; + self.push_operand(ValType::V128)?; + Ok(()) + } + + /// Checks a [`V128`] binary operator. + fn check_v128_binary_op(&mut self) -> Result<()> { + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::V128)?; + Ok(()) + } + + /// Checks a [`V128`] binary float operator. + fn check_v128_fbinary_op(&mut self) -> Result<()> { + self.check_floats_enabled()?; + self.check_v128_binary_op() + } + + /// Checks a [`V128`] unary operator. + fn check_v128_unary_op(&mut self) -> Result<()> { + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::V128)?; + Ok(()) + } + + /// Checks a [`V128`] unary float operator. + fn check_v128_funary_op(&mut self) -> Result<()> { + self.check_floats_enabled()?; + self.check_v128_unary_op() + } + + /// Checks a [`V128`] relaxed ternary operator. + fn check_v128_ternary_op(&mut self) -> Result<()> { + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::V128)?; + Ok(()) + } + + /// Checks a [`V128`] test operator. + fn check_v128_bitmask_op(&mut self) -> Result<()> { + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::I32)?; + Ok(()) + } + + /// Checks a [`V128`] shift operator. + fn check_v128_shift_op(&mut self) -> Result<()> { + self.pop_operand(Some(ValType::I32))?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::V128)?; + Ok(()) + } + + /// Checks a [`V128`] common load operator. + fn check_v128_load_op(&mut self, memarg: MemArg) -> Result<()> { + let idx = self.check_memarg(memarg)?; + self.pop_operand(Some(idx))?; + self.push_operand(ValType::V128)?; + Ok(()) + } +} + pub fn ty_to_str(ty: ValType) -> &'static str { match ty { ValType::I32 => "i32", From 267808ca15e4409ab8d425a2bddfb4fc9d4d6b04 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 19 Nov 2024 16:33:22 +0100 Subject: [PATCH 23/83] put simd specific operator validation in separate file --- crates/wasmparser/src/validator/operators.rs | 977 +----------------- .../src/validator/operators/simd.rs | 972 +++++++++++++++++ 2 files changed, 977 insertions(+), 972 deletions(-) create mode 100644 crates/wasmparser/src/validator/operators/simd.rs diff --git a/crates/wasmparser/src/validator/operators.rs b/crates/wasmparser/src/validator/operators.rs index 85535a1061..651373edeb 100644 --- a/crates/wasmparser/src/validator/operators.rs +++ b/crates/wasmparser/src/validator/operators.rs @@ -22,6 +22,8 @@ // confusing it's recommended to read over that section to see how it maps to // the various methods here. +#[cfg(feature = "simd")] +use crate::VisitSimdOperator; use crate::{ limits::MAX_WASM_FUNCTION_LOCALS, AbstractHeapType, BinaryReaderError, BlockType, BrTable, Catch, ContType, FieldType, FrameKind, FuncType, GlobalType, Handle, HeapType, Ieee32, Ieee64, @@ -29,10 +31,11 @@ use crate::{ TryTable, UnpackedIndex, ValType, VisitOperator, WasmFeatures, WasmModuleResources, }; use crate::{prelude::*, CompositeInnerType, Ordering}; -#[cfg(feature = "simd")] -use crate::{VisitSimdOperator, V128}; use core::ops::{Deref, DerefMut}; +#[cfg(feature = "simd")] +mod simd; + pub(crate) struct OperatorValidator { pub(super) locals: Locals, local_inits: LocalInits, @@ -1681,85 +1684,6 @@ where } } -#[cfg(feature = "simd")] -impl<'resources, R> OperatorValidatorTemp<'_, 'resources, R> -where - R: WasmModuleResources, -{ - fn check_simd_lane_index(&self, index: u8, max: u8) -> Result<()> { - if index >= max { - bail!(self.offset, "SIMD index out of bounds"); - } - Ok(()) - } - - /// Checks a [`V128`] splat operator. - fn check_v128_splat(&mut self, src_ty: ValType) -> Result<()> { - self.pop_operand(Some(src_ty))?; - self.push_operand(ValType::V128)?; - Ok(()) - } - - /// Checks a [`V128`] binary operator. - fn check_v128_binary_op(&mut self) -> Result<()> { - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::V128)?; - Ok(()) - } - - /// Checks a [`V128`] binary float operator. - fn check_v128_fbinary_op(&mut self) -> Result<()> { - self.check_floats_enabled()?; - self.check_v128_binary_op() - } - - /// Checks a [`V128`] unary operator. - fn check_v128_unary_op(&mut self) -> Result<()> { - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::V128)?; - Ok(()) - } - - /// Checks a [`V128`] unary float operator. - fn check_v128_funary_op(&mut self) -> Result<()> { - self.check_floats_enabled()?; - self.check_v128_unary_op() - } - - /// Checks a [`V128`] relaxed ternary operator. - fn check_v128_ternary_op(&mut self) -> Result<()> { - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::V128)?; - Ok(()) - } - - /// Checks a [`V128`] test operator. - fn check_v128_bitmask_op(&mut self) -> Result<()> { - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::I32)?; - Ok(()) - } - - /// Checks a [`V128`] shift operator. - fn check_v128_shift_op(&mut self) -> Result<()> { - self.pop_operand(Some(ValType::I32))?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::V128)?; - Ok(()) - } - - /// Checks a [`V128`] common load operator. - fn check_v128_load_op(&mut self, memarg: MemArg) -> Result<()> { - let idx = self.check_memarg(memarg)?; - self.pop_operand(Some(idx))?; - self.push_operand(ValType::V128)?; - Ok(()) - } -} - pub fn ty_to_str(ty: ValType) -> &'static str { match ty { ValType::I32 => "i32", @@ -4282,897 +4206,6 @@ where } } -#[cfg(feature = "simd")] -impl<'a, T> VisitSimdOperator<'a> for OperatorValidatorTemp<'_, '_, T> -where - T: WasmModuleResources, -{ - fn visit_v128_load(&mut self, memarg: MemArg) -> Self::Output { - let ty = self.check_memarg(memarg)?; - self.pop_operand(Some(ty))?; - self.push_operand(ValType::V128)?; - Ok(()) - } - fn visit_v128_store(&mut self, memarg: MemArg) -> Self::Output { - let ty = self.check_memarg(memarg)?; - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(ty))?; - Ok(()) - } - fn visit_v128_const(&mut self, _value: V128) -> Self::Output { - self.push_operand(ValType::V128)?; - Ok(()) - } - fn visit_i8x16_splat(&mut self) -> Self::Output { - self.check_v128_splat(ValType::I32) - } - fn visit_i16x8_splat(&mut self) -> Self::Output { - self.check_v128_splat(ValType::I32) - } - fn visit_i32x4_splat(&mut self) -> Self::Output { - self.check_v128_splat(ValType::I32) - } - fn visit_i64x2_splat(&mut self) -> Self::Output { - self.check_v128_splat(ValType::I64) - } - fn visit_f32x4_splat(&mut self) -> Self::Output { - self.check_floats_enabled()?; - self.check_v128_splat(ValType::F32) - } - fn visit_f64x2_splat(&mut self) -> Self::Output { - self.check_floats_enabled()?; - self.check_v128_splat(ValType::F64) - } - fn visit_i8x16_extract_lane_s(&mut self, lane: u8) -> Self::Output { - self.check_simd_lane_index(lane, 16)?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::I32)?; - Ok(()) - } - fn visit_i8x16_extract_lane_u(&mut self, lane: u8) -> Self::Output { - self.visit_i8x16_extract_lane_s(lane) - } - fn visit_i16x8_extract_lane_s(&mut self, lane: u8) -> Self::Output { - self.check_simd_lane_index(lane, 8)?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::I32)?; - Ok(()) - } - fn visit_i16x8_extract_lane_u(&mut self, lane: u8) -> Self::Output { - self.visit_i16x8_extract_lane_s(lane) - } - fn visit_i32x4_extract_lane(&mut self, lane: u8) -> Self::Output { - self.check_simd_lane_index(lane, 4)?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::I32)?; - Ok(()) - } - fn visit_i8x16_replace_lane(&mut self, lane: u8) -> Self::Output { - self.check_simd_lane_index(lane, 16)?; - self.pop_operand(Some(ValType::I32))?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::V128)?; - Ok(()) - } - fn visit_i16x8_replace_lane(&mut self, lane: u8) -> Self::Output { - self.check_simd_lane_index(lane, 8)?; - self.pop_operand(Some(ValType::I32))?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::V128)?; - Ok(()) - } - fn visit_i32x4_replace_lane(&mut self, lane: u8) -> Self::Output { - self.check_simd_lane_index(lane, 4)?; - self.pop_operand(Some(ValType::I32))?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::V128)?; - Ok(()) - } - fn visit_i64x2_extract_lane(&mut self, lane: u8) -> Self::Output { - self.check_simd_lane_index(lane, 2)?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::I64)?; - Ok(()) - } - fn visit_i64x2_replace_lane(&mut self, lane: u8) -> Self::Output { - self.check_simd_lane_index(lane, 2)?; - self.pop_operand(Some(ValType::I64))?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::V128)?; - Ok(()) - } - fn visit_f32x4_extract_lane(&mut self, lane: u8) -> Self::Output { - self.check_floats_enabled()?; - self.check_simd_lane_index(lane, 4)?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::F32)?; - Ok(()) - } - fn visit_f32x4_replace_lane(&mut self, lane: u8) -> Self::Output { - self.check_floats_enabled()?; - self.check_simd_lane_index(lane, 4)?; - self.pop_operand(Some(ValType::F32))?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::V128)?; - Ok(()) - } - fn visit_f64x2_extract_lane(&mut self, lane: u8) -> Self::Output { - self.check_floats_enabled()?; - self.check_simd_lane_index(lane, 2)?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::F64)?; - Ok(()) - } - fn visit_f64x2_replace_lane(&mut self, lane: u8) -> Self::Output { - self.check_floats_enabled()?; - self.check_simd_lane_index(lane, 2)?; - self.pop_operand(Some(ValType::F64))?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::V128)?; - Ok(()) - } - fn visit_f32x4_eq(&mut self) -> Self::Output { - self.check_v128_fbinary_op() - } - fn visit_f32x4_ne(&mut self) -> Self::Output { - self.check_v128_fbinary_op() - } - fn visit_f32x4_lt(&mut self) -> Self::Output { - self.check_v128_fbinary_op() - } - fn visit_f32x4_gt(&mut self) -> Self::Output { - self.check_v128_fbinary_op() - } - fn visit_f32x4_le(&mut self) -> Self::Output { - self.check_v128_fbinary_op() - } - fn visit_f32x4_ge(&mut self) -> Self::Output { - self.check_v128_fbinary_op() - } - fn visit_f64x2_eq(&mut self) -> Self::Output { - self.check_v128_fbinary_op() - } - fn visit_f64x2_ne(&mut self) -> Self::Output { - self.check_v128_fbinary_op() - } - fn visit_f64x2_lt(&mut self) -> Self::Output { - self.check_v128_fbinary_op() - } - fn visit_f64x2_gt(&mut self) -> Self::Output { - self.check_v128_fbinary_op() - } - fn visit_f64x2_le(&mut self) -> Self::Output { - self.check_v128_fbinary_op() - } - fn visit_f64x2_ge(&mut self) -> Self::Output { - self.check_v128_fbinary_op() - } - fn visit_f32x4_add(&mut self) -> Self::Output { - self.check_v128_fbinary_op() - } - fn visit_f32x4_sub(&mut self) -> Self::Output { - self.check_v128_fbinary_op() - } - fn visit_f32x4_mul(&mut self) -> Self::Output { - self.check_v128_fbinary_op() - } - fn visit_f32x4_div(&mut self) -> Self::Output { - self.check_v128_fbinary_op() - } - fn visit_f32x4_min(&mut self) -> Self::Output { - self.check_v128_fbinary_op() - } - fn visit_f32x4_max(&mut self) -> Self::Output { - self.check_v128_fbinary_op() - } - fn visit_f32x4_pmin(&mut self) -> Self::Output { - self.check_v128_fbinary_op() - } - fn visit_f32x4_pmax(&mut self) -> Self::Output { - self.check_v128_fbinary_op() - } - fn visit_f64x2_add(&mut self) -> Self::Output { - self.check_v128_fbinary_op() - } - fn visit_f64x2_sub(&mut self) -> Self::Output { - self.check_v128_fbinary_op() - } - fn visit_f64x2_mul(&mut self) -> Self::Output { - self.check_v128_fbinary_op() - } - fn visit_f64x2_div(&mut self) -> Self::Output { - self.check_v128_fbinary_op() - } - fn visit_f64x2_min(&mut self) -> Self::Output { - self.check_v128_fbinary_op() - } - fn visit_f64x2_max(&mut self) -> Self::Output { - self.check_v128_fbinary_op() - } - fn visit_f64x2_pmin(&mut self) -> Self::Output { - self.check_v128_fbinary_op() - } - fn visit_f64x2_pmax(&mut self) -> Self::Output { - self.check_v128_fbinary_op() - } - fn visit_i8x16_eq(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i8x16_ne(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i8x16_lt_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i8x16_lt_u(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i8x16_gt_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i8x16_gt_u(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i8x16_le_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i8x16_le_u(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i8x16_ge_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i8x16_ge_u(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i16x8_eq(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i16x8_ne(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i16x8_lt_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i16x8_lt_u(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i16x8_gt_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i16x8_gt_u(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i16x8_le_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i16x8_le_u(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i16x8_ge_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i16x8_ge_u(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i32x4_eq(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i32x4_ne(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i32x4_lt_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i32x4_lt_u(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i32x4_gt_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i32x4_gt_u(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i32x4_le_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i32x4_le_u(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i32x4_ge_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i32x4_ge_u(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i64x2_eq(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i64x2_ne(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i64x2_lt_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i64x2_gt_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i64x2_le_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i64x2_ge_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_v128_and(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_v128_andnot(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_v128_or(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_v128_xor(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i8x16_add(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i8x16_add_sat_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i8x16_add_sat_u(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i8x16_sub(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i8x16_sub_sat_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i8x16_sub_sat_u(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i8x16_min_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i8x16_min_u(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i8x16_max_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i8x16_max_u(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i16x8_add(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i16x8_add_sat_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i16x8_add_sat_u(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i16x8_sub(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i16x8_sub_sat_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i16x8_sub_sat_u(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i16x8_mul(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i16x8_min_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i16x8_min_u(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i16x8_max_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i16x8_max_u(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i32x4_add(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i32x4_sub(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i32x4_mul(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i32x4_min_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i32x4_min_u(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i32x4_max_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i32x4_max_u(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i32x4_dot_i16x8_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i64x2_add(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i64x2_sub(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i64x2_mul(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i8x16_avgr_u(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i16x8_avgr_u(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i8x16_narrow_i16x8_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i8x16_narrow_i16x8_u(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i16x8_narrow_i32x4_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i16x8_narrow_i32x4_u(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i16x8_extmul_low_i8x16_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i16x8_extmul_high_i8x16_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i16x8_extmul_low_i8x16_u(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i16x8_extmul_high_i8x16_u(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i32x4_extmul_low_i16x8_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i32x4_extmul_high_i16x8_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i32x4_extmul_low_i16x8_u(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i32x4_extmul_high_i16x8_u(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i64x2_extmul_low_i32x4_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i64x2_extmul_high_i32x4_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i64x2_extmul_low_i32x4_u(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i64x2_extmul_high_i32x4_u(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i16x8_q15mulr_sat_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_f32x4_ceil(&mut self) -> Self::Output { - self.check_v128_funary_op() - } - fn visit_f32x4_floor(&mut self) -> Self::Output { - self.check_v128_funary_op() - } - fn visit_f32x4_trunc(&mut self) -> Self::Output { - self.check_v128_funary_op() - } - fn visit_f32x4_nearest(&mut self) -> Self::Output { - self.check_v128_funary_op() - } - fn visit_f64x2_ceil(&mut self) -> Self::Output { - self.check_v128_funary_op() - } - fn visit_f64x2_floor(&mut self) -> Self::Output { - self.check_v128_funary_op() - } - fn visit_f64x2_trunc(&mut self) -> Self::Output { - self.check_v128_funary_op() - } - fn visit_f64x2_nearest(&mut self) -> Self::Output { - self.check_v128_funary_op() - } - fn visit_f32x4_abs(&mut self) -> Self::Output { - self.check_v128_funary_op() - } - fn visit_f32x4_neg(&mut self) -> Self::Output { - self.check_v128_funary_op() - } - fn visit_f32x4_sqrt(&mut self) -> Self::Output { - self.check_v128_funary_op() - } - fn visit_f64x2_abs(&mut self) -> Self::Output { - self.check_v128_funary_op() - } - fn visit_f64x2_neg(&mut self) -> Self::Output { - self.check_v128_funary_op() - } - fn visit_f64x2_sqrt(&mut self) -> Self::Output { - self.check_v128_funary_op() - } - fn visit_f32x4_demote_f64x2_zero(&mut self) -> Self::Output { - self.check_v128_funary_op() - } - fn visit_f64x2_promote_low_f32x4(&mut self) -> Self::Output { - self.check_v128_funary_op() - } - fn visit_f64x2_convert_low_i32x4_s(&mut self) -> Self::Output { - self.check_v128_funary_op() - } - fn visit_f64x2_convert_low_i32x4_u(&mut self) -> Self::Output { - self.check_v128_funary_op() - } - fn visit_i32x4_trunc_sat_f32x4_s(&mut self) -> Self::Output { - self.check_v128_funary_op() - } - fn visit_i32x4_trunc_sat_f32x4_u(&mut self) -> Self::Output { - self.check_v128_funary_op() - } - fn visit_i32x4_trunc_sat_f64x2_s_zero(&mut self) -> Self::Output { - self.check_v128_funary_op() - } - fn visit_i32x4_trunc_sat_f64x2_u_zero(&mut self) -> Self::Output { - self.check_v128_funary_op() - } - fn visit_f32x4_convert_i32x4_s(&mut self) -> Self::Output { - self.check_v128_funary_op() - } - fn visit_f32x4_convert_i32x4_u(&mut self) -> Self::Output { - self.check_v128_funary_op() - } - fn visit_v128_not(&mut self) -> Self::Output { - self.check_v128_unary_op() - } - fn visit_i8x16_abs(&mut self) -> Self::Output { - self.check_v128_unary_op() - } - fn visit_i8x16_neg(&mut self) -> Self::Output { - self.check_v128_unary_op() - } - fn visit_i8x16_popcnt(&mut self) -> Self::Output { - self.check_v128_unary_op() - } - fn visit_i16x8_abs(&mut self) -> Self::Output { - self.check_v128_unary_op() - } - fn visit_i16x8_neg(&mut self) -> Self::Output { - self.check_v128_unary_op() - } - fn visit_i32x4_abs(&mut self) -> Self::Output { - self.check_v128_unary_op() - } - fn visit_i32x4_neg(&mut self) -> Self::Output { - self.check_v128_unary_op() - } - fn visit_i64x2_abs(&mut self) -> Self::Output { - self.check_v128_unary_op() - } - fn visit_i64x2_neg(&mut self) -> Self::Output { - self.check_v128_unary_op() - } - fn visit_i16x8_extend_low_i8x16_s(&mut self) -> Self::Output { - self.check_v128_unary_op() - } - fn visit_i16x8_extend_high_i8x16_s(&mut self) -> Self::Output { - self.check_v128_unary_op() - } - fn visit_i16x8_extend_low_i8x16_u(&mut self) -> Self::Output { - self.check_v128_unary_op() - } - fn visit_i16x8_extend_high_i8x16_u(&mut self) -> Self::Output { - self.check_v128_unary_op() - } - fn visit_i32x4_extend_low_i16x8_s(&mut self) -> Self::Output { - self.check_v128_unary_op() - } - fn visit_i32x4_extend_high_i16x8_s(&mut self) -> Self::Output { - self.check_v128_unary_op() - } - fn visit_i32x4_extend_low_i16x8_u(&mut self) -> Self::Output { - self.check_v128_unary_op() - } - fn visit_i32x4_extend_high_i16x8_u(&mut self) -> Self::Output { - self.check_v128_unary_op() - } - fn visit_i64x2_extend_low_i32x4_s(&mut self) -> Self::Output { - self.check_v128_unary_op() - } - fn visit_i64x2_extend_high_i32x4_s(&mut self) -> Self::Output { - self.check_v128_unary_op() - } - fn visit_i64x2_extend_low_i32x4_u(&mut self) -> Self::Output { - self.check_v128_unary_op() - } - fn visit_i64x2_extend_high_i32x4_u(&mut self) -> Self::Output { - self.check_v128_unary_op() - } - fn visit_i16x8_extadd_pairwise_i8x16_s(&mut self) -> Self::Output { - self.check_v128_unary_op() - } - fn visit_i16x8_extadd_pairwise_i8x16_u(&mut self) -> Self::Output { - self.check_v128_unary_op() - } - fn visit_i32x4_extadd_pairwise_i16x8_s(&mut self) -> Self::Output { - self.check_v128_unary_op() - } - fn visit_i32x4_extadd_pairwise_i16x8_u(&mut self) -> Self::Output { - self.check_v128_unary_op() - } - fn visit_v128_bitselect(&mut self) -> Self::Output { - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::V128)?; - Ok(()) - } - fn visit_i8x16_relaxed_swizzle(&mut self) -> Self::Output { - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::V128)?; - Ok(()) - } - fn visit_i32x4_relaxed_trunc_f32x4_s(&mut self) -> Self::Output { - self.check_v128_unary_op() - } - fn visit_i32x4_relaxed_trunc_f32x4_u(&mut self) -> Self::Output { - self.check_v128_unary_op() - } - fn visit_i32x4_relaxed_trunc_f64x2_s_zero(&mut self) -> Self::Output { - self.check_v128_unary_op() - } - fn visit_i32x4_relaxed_trunc_f64x2_u_zero(&mut self) -> Self::Output { - self.check_v128_unary_op() - } - fn visit_f32x4_relaxed_madd(&mut self) -> Self::Output { - self.check_v128_ternary_op() - } - fn visit_f32x4_relaxed_nmadd(&mut self) -> Self::Output { - self.check_v128_ternary_op() - } - fn visit_f64x2_relaxed_madd(&mut self) -> Self::Output { - self.check_v128_ternary_op() - } - fn visit_f64x2_relaxed_nmadd(&mut self) -> Self::Output { - self.check_v128_ternary_op() - } - fn visit_i8x16_relaxed_laneselect(&mut self) -> Self::Output { - self.check_v128_ternary_op() - } - fn visit_i16x8_relaxed_laneselect(&mut self) -> Self::Output { - self.check_v128_ternary_op() - } - fn visit_i32x4_relaxed_laneselect(&mut self) -> Self::Output { - self.check_v128_ternary_op() - } - fn visit_i64x2_relaxed_laneselect(&mut self) -> Self::Output { - self.check_v128_ternary_op() - } - fn visit_f32x4_relaxed_min(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_f32x4_relaxed_max(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_f64x2_relaxed_min(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_f64x2_relaxed_max(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i16x8_relaxed_q15mulr_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i16x8_relaxed_dot_i8x16_i7x16_s(&mut self) -> Self::Output { - self.check_v128_binary_op() - } - fn visit_i32x4_relaxed_dot_i8x16_i7x16_add_s(&mut self) -> Self::Output { - self.check_v128_ternary_op() - } - fn visit_v128_any_true(&mut self) -> Self::Output { - self.check_v128_bitmask_op() - } - fn visit_i8x16_all_true(&mut self) -> Self::Output { - self.check_v128_bitmask_op() - } - fn visit_i8x16_bitmask(&mut self) -> Self::Output { - self.check_v128_bitmask_op() - } - fn visit_i16x8_all_true(&mut self) -> Self::Output { - self.check_v128_bitmask_op() - } - fn visit_i16x8_bitmask(&mut self) -> Self::Output { - self.check_v128_bitmask_op() - } - fn visit_i32x4_all_true(&mut self) -> Self::Output { - self.check_v128_bitmask_op() - } - fn visit_i32x4_bitmask(&mut self) -> Self::Output { - self.check_v128_bitmask_op() - } - fn visit_i64x2_all_true(&mut self) -> Self::Output { - self.check_v128_bitmask_op() - } - fn visit_i64x2_bitmask(&mut self) -> Self::Output { - self.check_v128_bitmask_op() - } - fn visit_i8x16_shl(&mut self) -> Self::Output { - self.check_v128_shift_op() - } - fn visit_i8x16_shr_s(&mut self) -> Self::Output { - self.check_v128_shift_op() - } - fn visit_i8x16_shr_u(&mut self) -> Self::Output { - self.check_v128_shift_op() - } - fn visit_i16x8_shl(&mut self) -> Self::Output { - self.check_v128_shift_op() - } - fn visit_i16x8_shr_s(&mut self) -> Self::Output { - self.check_v128_shift_op() - } - fn visit_i16x8_shr_u(&mut self) -> Self::Output { - self.check_v128_shift_op() - } - fn visit_i32x4_shl(&mut self) -> Self::Output { - self.check_v128_shift_op() - } - fn visit_i32x4_shr_s(&mut self) -> Self::Output { - self.check_v128_shift_op() - } - fn visit_i32x4_shr_u(&mut self) -> Self::Output { - self.check_v128_shift_op() - } - fn visit_i64x2_shl(&mut self) -> Self::Output { - self.check_v128_shift_op() - } - fn visit_i64x2_shr_s(&mut self) -> Self::Output { - self.check_v128_shift_op() - } - fn visit_i64x2_shr_u(&mut self) -> Self::Output { - self.check_v128_shift_op() - } - fn visit_i8x16_swizzle(&mut self) -> Self::Output { - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(ValType::V128))?; - self.push_operand(ValType::V128)?; - Ok(()) - } - fn visit_i8x16_shuffle(&mut self, lanes: [u8; 16]) -> Self::Output { - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(ValType::V128))?; - for i in lanes { - self.check_simd_lane_index(i, 32)?; - } - self.push_operand(ValType::V128)?; - Ok(()) - } - fn visit_v128_load8_splat(&mut self, memarg: MemArg) -> Self::Output { - let ty = self.check_memarg(memarg)?; - self.pop_operand(Some(ty))?; - self.push_operand(ValType::V128)?; - Ok(()) - } - fn visit_v128_load16_splat(&mut self, memarg: MemArg) -> Self::Output { - let ty = self.check_memarg(memarg)?; - self.pop_operand(Some(ty))?; - self.push_operand(ValType::V128)?; - Ok(()) - } - fn visit_v128_load32_splat(&mut self, memarg: MemArg) -> Self::Output { - let ty = self.check_memarg(memarg)?; - self.pop_operand(Some(ty))?; - self.push_operand(ValType::V128)?; - Ok(()) - } - fn visit_v128_load32_zero(&mut self, memarg: MemArg) -> Self::Output { - self.visit_v128_load32_splat(memarg) - } - fn visit_v128_load64_splat(&mut self, memarg: MemArg) -> Self::Output { - self.check_v128_load_op(memarg) - } - fn visit_v128_load64_zero(&mut self, memarg: MemArg) -> Self::Output { - self.check_v128_load_op(memarg) - } - fn visit_v128_load8x8_s(&mut self, memarg: MemArg) -> Self::Output { - self.check_v128_load_op(memarg) - } - fn visit_v128_load8x8_u(&mut self, memarg: MemArg) -> Self::Output { - self.check_v128_load_op(memarg) - } - fn visit_v128_load16x4_s(&mut self, memarg: MemArg) -> Self::Output { - self.check_v128_load_op(memarg) - } - fn visit_v128_load16x4_u(&mut self, memarg: MemArg) -> Self::Output { - self.check_v128_load_op(memarg) - } - fn visit_v128_load32x2_s(&mut self, memarg: MemArg) -> Self::Output { - self.check_v128_load_op(memarg) - } - fn visit_v128_load32x2_u(&mut self, memarg: MemArg) -> Self::Output { - self.check_v128_load_op(memarg) - } - fn visit_v128_load8_lane(&mut self, memarg: MemArg, lane: u8) -> Self::Output { - let idx = self.check_memarg(memarg)?; - self.check_simd_lane_index(lane, 16)?; - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(idx))?; - self.push_operand(ValType::V128)?; - Ok(()) - } - fn visit_v128_load16_lane(&mut self, memarg: MemArg, lane: u8) -> Self::Output { - let idx = self.check_memarg(memarg)?; - self.check_simd_lane_index(lane, 8)?; - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(idx))?; - self.push_operand(ValType::V128)?; - Ok(()) - } - fn visit_v128_load32_lane(&mut self, memarg: MemArg, lane: u8) -> Self::Output { - let idx = self.check_memarg(memarg)?; - self.check_simd_lane_index(lane, 4)?; - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(idx))?; - self.push_operand(ValType::V128)?; - Ok(()) - } - fn visit_v128_load64_lane(&mut self, memarg: MemArg, lane: u8) -> Self::Output { - let idx = self.check_memarg(memarg)?; - self.check_simd_lane_index(lane, 2)?; - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(idx))?; - self.push_operand(ValType::V128)?; - Ok(()) - } - fn visit_v128_store8_lane(&mut self, memarg: MemArg, lane: u8) -> Self::Output { - let idx = self.check_memarg(memarg)?; - self.check_simd_lane_index(lane, 16)?; - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(idx))?; - Ok(()) - } - fn visit_v128_store16_lane(&mut self, memarg: MemArg, lane: u8) -> Self::Output { - let idx = self.check_memarg(memarg)?; - self.check_simd_lane_index(lane, 8)?; - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(idx))?; - Ok(()) - } - fn visit_v128_store32_lane(&mut self, memarg: MemArg, lane: u8) -> Self::Output { - let idx = self.check_memarg(memarg)?; - self.check_simd_lane_index(lane, 4)?; - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(idx))?; - Ok(()) - } - fn visit_v128_store64_lane(&mut self, memarg: MemArg, lane: u8) -> Self::Output { - let idx = self.check_memarg(memarg)?; - self.check_simd_lane_index(lane, 2)?; - self.pop_operand(Some(ValType::V128))?; - self.pop_operand(Some(idx))?; - Ok(()) - } -} - #[derive(Clone, Debug)] enum Either { A(A), diff --git a/crates/wasmparser/src/validator/operators/simd.rs b/crates/wasmparser/src/validator/operators/simd.rs new file mode 100644 index 0000000000..1d4e78d161 --- /dev/null +++ b/crates/wasmparser/src/validator/operators/simd.rs @@ -0,0 +1,972 @@ +use super::OperatorValidatorTemp; +use crate::{MemArg, Result, ValType, WasmModuleResources}; +#[cfg(feature = "simd")] +use crate::{VisitSimdOperator, V128}; + +impl<'resources, R> OperatorValidatorTemp<'_, 'resources, R> +where + R: WasmModuleResources, +{ + fn check_simd_lane_index(&self, index: u8, max: u8) -> Result<()> { + if index >= max { + bail!(self.offset, "SIMD index out of bounds"); + } + Ok(()) + } + + /// Checks a [`V128`] splat operator. + fn check_v128_splat(&mut self, src_ty: ValType) -> Result<()> { + self.pop_operand(Some(src_ty))?; + self.push_operand(ValType::V128)?; + Ok(()) + } + + /// Checks a [`V128`] binary operator. + fn check_v128_binary_op(&mut self) -> Result<()> { + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::V128)?; + Ok(()) + } + + /// Checks a [`V128`] binary float operator. + fn check_v128_fbinary_op(&mut self) -> Result<()> { + self.check_floats_enabled()?; + self.check_v128_binary_op() + } + + /// Checks a [`V128`] unary operator. + fn check_v128_unary_op(&mut self) -> Result<()> { + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::V128)?; + Ok(()) + } + + /// Checks a [`V128`] unary float operator. + fn check_v128_funary_op(&mut self) -> Result<()> { + self.check_floats_enabled()?; + self.check_v128_unary_op() + } + + /// Checks a [`V128`] relaxed ternary operator. + fn check_v128_ternary_op(&mut self) -> Result<()> { + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::V128)?; + Ok(()) + } + + /// Checks a [`V128`] test operator. + fn check_v128_bitmask_op(&mut self) -> Result<()> { + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::I32)?; + Ok(()) + } + + /// Checks a [`V128`] shift operator. + fn check_v128_shift_op(&mut self) -> Result<()> { + self.pop_operand(Some(ValType::I32))?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::V128)?; + Ok(()) + } + + /// Checks a [`V128`] common load operator. + fn check_v128_load_op(&mut self, memarg: MemArg) -> Result<()> { + let idx = self.check_memarg(memarg)?; + self.pop_operand(Some(idx))?; + self.push_operand(ValType::V128)?; + Ok(()) + } +} + +impl<'a, T> VisitSimdOperator<'a> for OperatorValidatorTemp<'_, '_, T> +where + T: WasmModuleResources, +{ + fn visit_v128_load(&mut self, memarg: MemArg) -> Self::Output { + let ty = self.check_memarg(memarg)?; + self.pop_operand(Some(ty))?; + self.push_operand(ValType::V128)?; + Ok(()) + } + fn visit_v128_store(&mut self, memarg: MemArg) -> Self::Output { + let ty = self.check_memarg(memarg)?; + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(ty))?; + Ok(()) + } + fn visit_v128_const(&mut self, _value: V128) -> Self::Output { + self.push_operand(ValType::V128)?; + Ok(()) + } + fn visit_i8x16_splat(&mut self) -> Self::Output { + self.check_v128_splat(ValType::I32) + } + fn visit_i16x8_splat(&mut self) -> Self::Output { + self.check_v128_splat(ValType::I32) + } + fn visit_i32x4_splat(&mut self) -> Self::Output { + self.check_v128_splat(ValType::I32) + } + fn visit_i64x2_splat(&mut self) -> Self::Output { + self.check_v128_splat(ValType::I64) + } + fn visit_f32x4_splat(&mut self) -> Self::Output { + self.check_floats_enabled()?; + self.check_v128_splat(ValType::F32) + } + fn visit_f64x2_splat(&mut self) -> Self::Output { + self.check_floats_enabled()?; + self.check_v128_splat(ValType::F64) + } + fn visit_i8x16_extract_lane_s(&mut self, lane: u8) -> Self::Output { + self.check_simd_lane_index(lane, 16)?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::I32)?; + Ok(()) + } + fn visit_i8x16_extract_lane_u(&mut self, lane: u8) -> Self::Output { + self.visit_i8x16_extract_lane_s(lane) + } + fn visit_i16x8_extract_lane_s(&mut self, lane: u8) -> Self::Output { + self.check_simd_lane_index(lane, 8)?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::I32)?; + Ok(()) + } + fn visit_i16x8_extract_lane_u(&mut self, lane: u8) -> Self::Output { + self.visit_i16x8_extract_lane_s(lane) + } + fn visit_i32x4_extract_lane(&mut self, lane: u8) -> Self::Output { + self.check_simd_lane_index(lane, 4)?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::I32)?; + Ok(()) + } + fn visit_i8x16_replace_lane(&mut self, lane: u8) -> Self::Output { + self.check_simd_lane_index(lane, 16)?; + self.pop_operand(Some(ValType::I32))?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::V128)?; + Ok(()) + } + fn visit_i16x8_replace_lane(&mut self, lane: u8) -> Self::Output { + self.check_simd_lane_index(lane, 8)?; + self.pop_operand(Some(ValType::I32))?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::V128)?; + Ok(()) + } + fn visit_i32x4_replace_lane(&mut self, lane: u8) -> Self::Output { + self.check_simd_lane_index(lane, 4)?; + self.pop_operand(Some(ValType::I32))?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::V128)?; + Ok(()) + } + fn visit_i64x2_extract_lane(&mut self, lane: u8) -> Self::Output { + self.check_simd_lane_index(lane, 2)?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::I64)?; + Ok(()) + } + fn visit_i64x2_replace_lane(&mut self, lane: u8) -> Self::Output { + self.check_simd_lane_index(lane, 2)?; + self.pop_operand(Some(ValType::I64))?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::V128)?; + Ok(()) + } + fn visit_f32x4_extract_lane(&mut self, lane: u8) -> Self::Output { + self.check_floats_enabled()?; + self.check_simd_lane_index(lane, 4)?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::F32)?; + Ok(()) + } + fn visit_f32x4_replace_lane(&mut self, lane: u8) -> Self::Output { + self.check_floats_enabled()?; + self.check_simd_lane_index(lane, 4)?; + self.pop_operand(Some(ValType::F32))?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::V128)?; + Ok(()) + } + fn visit_f64x2_extract_lane(&mut self, lane: u8) -> Self::Output { + self.check_floats_enabled()?; + self.check_simd_lane_index(lane, 2)?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::F64)?; + Ok(()) + } + fn visit_f64x2_replace_lane(&mut self, lane: u8) -> Self::Output { + self.check_floats_enabled()?; + self.check_simd_lane_index(lane, 2)?; + self.pop_operand(Some(ValType::F64))?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::V128)?; + Ok(()) + } + fn visit_f32x4_eq(&mut self) -> Self::Output { + self.check_v128_fbinary_op() + } + fn visit_f32x4_ne(&mut self) -> Self::Output { + self.check_v128_fbinary_op() + } + fn visit_f32x4_lt(&mut self) -> Self::Output { + self.check_v128_fbinary_op() + } + fn visit_f32x4_gt(&mut self) -> Self::Output { + self.check_v128_fbinary_op() + } + fn visit_f32x4_le(&mut self) -> Self::Output { + self.check_v128_fbinary_op() + } + fn visit_f32x4_ge(&mut self) -> Self::Output { + self.check_v128_fbinary_op() + } + fn visit_f64x2_eq(&mut self) -> Self::Output { + self.check_v128_fbinary_op() + } + fn visit_f64x2_ne(&mut self) -> Self::Output { + self.check_v128_fbinary_op() + } + fn visit_f64x2_lt(&mut self) -> Self::Output { + self.check_v128_fbinary_op() + } + fn visit_f64x2_gt(&mut self) -> Self::Output { + self.check_v128_fbinary_op() + } + fn visit_f64x2_le(&mut self) -> Self::Output { + self.check_v128_fbinary_op() + } + fn visit_f64x2_ge(&mut self) -> Self::Output { + self.check_v128_fbinary_op() + } + fn visit_f32x4_add(&mut self) -> Self::Output { + self.check_v128_fbinary_op() + } + fn visit_f32x4_sub(&mut self) -> Self::Output { + self.check_v128_fbinary_op() + } + fn visit_f32x4_mul(&mut self) -> Self::Output { + self.check_v128_fbinary_op() + } + fn visit_f32x4_div(&mut self) -> Self::Output { + self.check_v128_fbinary_op() + } + fn visit_f32x4_min(&mut self) -> Self::Output { + self.check_v128_fbinary_op() + } + fn visit_f32x4_max(&mut self) -> Self::Output { + self.check_v128_fbinary_op() + } + fn visit_f32x4_pmin(&mut self) -> Self::Output { + self.check_v128_fbinary_op() + } + fn visit_f32x4_pmax(&mut self) -> Self::Output { + self.check_v128_fbinary_op() + } + fn visit_f64x2_add(&mut self) -> Self::Output { + self.check_v128_fbinary_op() + } + fn visit_f64x2_sub(&mut self) -> Self::Output { + self.check_v128_fbinary_op() + } + fn visit_f64x2_mul(&mut self) -> Self::Output { + self.check_v128_fbinary_op() + } + fn visit_f64x2_div(&mut self) -> Self::Output { + self.check_v128_fbinary_op() + } + fn visit_f64x2_min(&mut self) -> Self::Output { + self.check_v128_fbinary_op() + } + fn visit_f64x2_max(&mut self) -> Self::Output { + self.check_v128_fbinary_op() + } + fn visit_f64x2_pmin(&mut self) -> Self::Output { + self.check_v128_fbinary_op() + } + fn visit_f64x2_pmax(&mut self) -> Self::Output { + self.check_v128_fbinary_op() + } + fn visit_i8x16_eq(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i8x16_ne(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i8x16_lt_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i8x16_lt_u(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i8x16_gt_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i8x16_gt_u(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i8x16_le_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i8x16_le_u(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i8x16_ge_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i8x16_ge_u(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i16x8_eq(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i16x8_ne(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i16x8_lt_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i16x8_lt_u(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i16x8_gt_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i16x8_gt_u(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i16x8_le_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i16x8_le_u(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i16x8_ge_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i16x8_ge_u(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i32x4_eq(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i32x4_ne(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i32x4_lt_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i32x4_lt_u(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i32x4_gt_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i32x4_gt_u(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i32x4_le_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i32x4_le_u(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i32x4_ge_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i32x4_ge_u(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i64x2_eq(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i64x2_ne(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i64x2_lt_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i64x2_gt_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i64x2_le_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i64x2_ge_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_v128_and(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_v128_andnot(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_v128_or(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_v128_xor(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i8x16_add(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i8x16_add_sat_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i8x16_add_sat_u(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i8x16_sub(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i8x16_sub_sat_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i8x16_sub_sat_u(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i8x16_min_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i8x16_min_u(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i8x16_max_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i8x16_max_u(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i16x8_add(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i16x8_add_sat_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i16x8_add_sat_u(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i16x8_sub(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i16x8_sub_sat_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i16x8_sub_sat_u(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i16x8_mul(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i16x8_min_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i16x8_min_u(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i16x8_max_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i16x8_max_u(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i32x4_add(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i32x4_sub(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i32x4_mul(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i32x4_min_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i32x4_min_u(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i32x4_max_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i32x4_max_u(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i32x4_dot_i16x8_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i64x2_add(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i64x2_sub(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i64x2_mul(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i8x16_avgr_u(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i16x8_avgr_u(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i8x16_narrow_i16x8_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i8x16_narrow_i16x8_u(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i16x8_narrow_i32x4_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i16x8_narrow_i32x4_u(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i16x8_extmul_low_i8x16_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i16x8_extmul_high_i8x16_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i16x8_extmul_low_i8x16_u(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i16x8_extmul_high_i8x16_u(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i32x4_extmul_low_i16x8_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i32x4_extmul_high_i16x8_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i32x4_extmul_low_i16x8_u(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i32x4_extmul_high_i16x8_u(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i64x2_extmul_low_i32x4_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i64x2_extmul_high_i32x4_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i64x2_extmul_low_i32x4_u(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i64x2_extmul_high_i32x4_u(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i16x8_q15mulr_sat_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_f32x4_ceil(&mut self) -> Self::Output { + self.check_v128_funary_op() + } + fn visit_f32x4_floor(&mut self) -> Self::Output { + self.check_v128_funary_op() + } + fn visit_f32x4_trunc(&mut self) -> Self::Output { + self.check_v128_funary_op() + } + fn visit_f32x4_nearest(&mut self) -> Self::Output { + self.check_v128_funary_op() + } + fn visit_f64x2_ceil(&mut self) -> Self::Output { + self.check_v128_funary_op() + } + fn visit_f64x2_floor(&mut self) -> Self::Output { + self.check_v128_funary_op() + } + fn visit_f64x2_trunc(&mut self) -> Self::Output { + self.check_v128_funary_op() + } + fn visit_f64x2_nearest(&mut self) -> Self::Output { + self.check_v128_funary_op() + } + fn visit_f32x4_abs(&mut self) -> Self::Output { + self.check_v128_funary_op() + } + fn visit_f32x4_neg(&mut self) -> Self::Output { + self.check_v128_funary_op() + } + fn visit_f32x4_sqrt(&mut self) -> Self::Output { + self.check_v128_funary_op() + } + fn visit_f64x2_abs(&mut self) -> Self::Output { + self.check_v128_funary_op() + } + fn visit_f64x2_neg(&mut self) -> Self::Output { + self.check_v128_funary_op() + } + fn visit_f64x2_sqrt(&mut self) -> Self::Output { + self.check_v128_funary_op() + } + fn visit_f32x4_demote_f64x2_zero(&mut self) -> Self::Output { + self.check_v128_funary_op() + } + fn visit_f64x2_promote_low_f32x4(&mut self) -> Self::Output { + self.check_v128_funary_op() + } + fn visit_f64x2_convert_low_i32x4_s(&mut self) -> Self::Output { + self.check_v128_funary_op() + } + fn visit_f64x2_convert_low_i32x4_u(&mut self) -> Self::Output { + self.check_v128_funary_op() + } + fn visit_i32x4_trunc_sat_f32x4_s(&mut self) -> Self::Output { + self.check_v128_funary_op() + } + fn visit_i32x4_trunc_sat_f32x4_u(&mut self) -> Self::Output { + self.check_v128_funary_op() + } + fn visit_i32x4_trunc_sat_f64x2_s_zero(&mut self) -> Self::Output { + self.check_v128_funary_op() + } + fn visit_i32x4_trunc_sat_f64x2_u_zero(&mut self) -> Self::Output { + self.check_v128_funary_op() + } + fn visit_f32x4_convert_i32x4_s(&mut self) -> Self::Output { + self.check_v128_funary_op() + } + fn visit_f32x4_convert_i32x4_u(&mut self) -> Self::Output { + self.check_v128_funary_op() + } + fn visit_v128_not(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i8x16_abs(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i8x16_neg(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i8x16_popcnt(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i16x8_abs(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i16x8_neg(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i32x4_abs(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i32x4_neg(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i64x2_abs(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i64x2_neg(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i16x8_extend_low_i8x16_s(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i16x8_extend_high_i8x16_s(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i16x8_extend_low_i8x16_u(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i16x8_extend_high_i8x16_u(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i32x4_extend_low_i16x8_s(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i32x4_extend_high_i16x8_s(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i32x4_extend_low_i16x8_u(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i32x4_extend_high_i16x8_u(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i64x2_extend_low_i32x4_s(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i64x2_extend_high_i32x4_s(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i64x2_extend_low_i32x4_u(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i64x2_extend_high_i32x4_u(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i16x8_extadd_pairwise_i8x16_s(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i16x8_extadd_pairwise_i8x16_u(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i32x4_extadd_pairwise_i16x8_s(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i32x4_extadd_pairwise_i16x8_u(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_v128_bitselect(&mut self) -> Self::Output { + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::V128)?; + Ok(()) + } + fn visit_i8x16_relaxed_swizzle(&mut self) -> Self::Output { + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::V128)?; + Ok(()) + } + fn visit_i32x4_relaxed_trunc_f32x4_s(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i32x4_relaxed_trunc_f32x4_u(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i32x4_relaxed_trunc_f64x2_s_zero(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_i32x4_relaxed_trunc_f64x2_u_zero(&mut self) -> Self::Output { + self.check_v128_unary_op() + } + fn visit_f32x4_relaxed_madd(&mut self) -> Self::Output { + self.check_v128_ternary_op() + } + fn visit_f32x4_relaxed_nmadd(&mut self) -> Self::Output { + self.check_v128_ternary_op() + } + fn visit_f64x2_relaxed_madd(&mut self) -> Self::Output { + self.check_v128_ternary_op() + } + fn visit_f64x2_relaxed_nmadd(&mut self) -> Self::Output { + self.check_v128_ternary_op() + } + fn visit_i8x16_relaxed_laneselect(&mut self) -> Self::Output { + self.check_v128_ternary_op() + } + fn visit_i16x8_relaxed_laneselect(&mut self) -> Self::Output { + self.check_v128_ternary_op() + } + fn visit_i32x4_relaxed_laneselect(&mut self) -> Self::Output { + self.check_v128_ternary_op() + } + fn visit_i64x2_relaxed_laneselect(&mut self) -> Self::Output { + self.check_v128_ternary_op() + } + fn visit_f32x4_relaxed_min(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_f32x4_relaxed_max(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_f64x2_relaxed_min(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_f64x2_relaxed_max(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i16x8_relaxed_q15mulr_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i16x8_relaxed_dot_i8x16_i7x16_s(&mut self) -> Self::Output { + self.check_v128_binary_op() + } + fn visit_i32x4_relaxed_dot_i8x16_i7x16_add_s(&mut self) -> Self::Output { + self.check_v128_ternary_op() + } + fn visit_v128_any_true(&mut self) -> Self::Output { + self.check_v128_bitmask_op() + } + fn visit_i8x16_all_true(&mut self) -> Self::Output { + self.check_v128_bitmask_op() + } + fn visit_i8x16_bitmask(&mut self) -> Self::Output { + self.check_v128_bitmask_op() + } + fn visit_i16x8_all_true(&mut self) -> Self::Output { + self.check_v128_bitmask_op() + } + fn visit_i16x8_bitmask(&mut self) -> Self::Output { + self.check_v128_bitmask_op() + } + fn visit_i32x4_all_true(&mut self) -> Self::Output { + self.check_v128_bitmask_op() + } + fn visit_i32x4_bitmask(&mut self) -> Self::Output { + self.check_v128_bitmask_op() + } + fn visit_i64x2_all_true(&mut self) -> Self::Output { + self.check_v128_bitmask_op() + } + fn visit_i64x2_bitmask(&mut self) -> Self::Output { + self.check_v128_bitmask_op() + } + fn visit_i8x16_shl(&mut self) -> Self::Output { + self.check_v128_shift_op() + } + fn visit_i8x16_shr_s(&mut self) -> Self::Output { + self.check_v128_shift_op() + } + fn visit_i8x16_shr_u(&mut self) -> Self::Output { + self.check_v128_shift_op() + } + fn visit_i16x8_shl(&mut self) -> Self::Output { + self.check_v128_shift_op() + } + fn visit_i16x8_shr_s(&mut self) -> Self::Output { + self.check_v128_shift_op() + } + fn visit_i16x8_shr_u(&mut self) -> Self::Output { + self.check_v128_shift_op() + } + fn visit_i32x4_shl(&mut self) -> Self::Output { + self.check_v128_shift_op() + } + fn visit_i32x4_shr_s(&mut self) -> Self::Output { + self.check_v128_shift_op() + } + fn visit_i32x4_shr_u(&mut self) -> Self::Output { + self.check_v128_shift_op() + } + fn visit_i64x2_shl(&mut self) -> Self::Output { + self.check_v128_shift_op() + } + fn visit_i64x2_shr_s(&mut self) -> Self::Output { + self.check_v128_shift_op() + } + fn visit_i64x2_shr_u(&mut self) -> Self::Output { + self.check_v128_shift_op() + } + fn visit_i8x16_swizzle(&mut self) -> Self::Output { + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(ValType::V128))?; + self.push_operand(ValType::V128)?; + Ok(()) + } + fn visit_i8x16_shuffle(&mut self, lanes: [u8; 16]) -> Self::Output { + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(ValType::V128))?; + for i in lanes { + self.check_simd_lane_index(i, 32)?; + } + self.push_operand(ValType::V128)?; + Ok(()) + } + fn visit_v128_load8_splat(&mut self, memarg: MemArg) -> Self::Output { + let ty = self.check_memarg(memarg)?; + self.pop_operand(Some(ty))?; + self.push_operand(ValType::V128)?; + Ok(()) + } + fn visit_v128_load16_splat(&mut self, memarg: MemArg) -> Self::Output { + let ty = self.check_memarg(memarg)?; + self.pop_operand(Some(ty))?; + self.push_operand(ValType::V128)?; + Ok(()) + } + fn visit_v128_load32_splat(&mut self, memarg: MemArg) -> Self::Output { + let ty = self.check_memarg(memarg)?; + self.pop_operand(Some(ty))?; + self.push_operand(ValType::V128)?; + Ok(()) + } + fn visit_v128_load32_zero(&mut self, memarg: MemArg) -> Self::Output { + self.visit_v128_load32_splat(memarg) + } + fn visit_v128_load64_splat(&mut self, memarg: MemArg) -> Self::Output { + self.check_v128_load_op(memarg) + } + fn visit_v128_load64_zero(&mut self, memarg: MemArg) -> Self::Output { + self.check_v128_load_op(memarg) + } + fn visit_v128_load8x8_s(&mut self, memarg: MemArg) -> Self::Output { + self.check_v128_load_op(memarg) + } + fn visit_v128_load8x8_u(&mut self, memarg: MemArg) -> Self::Output { + self.check_v128_load_op(memarg) + } + fn visit_v128_load16x4_s(&mut self, memarg: MemArg) -> Self::Output { + self.check_v128_load_op(memarg) + } + fn visit_v128_load16x4_u(&mut self, memarg: MemArg) -> Self::Output { + self.check_v128_load_op(memarg) + } + fn visit_v128_load32x2_s(&mut self, memarg: MemArg) -> Self::Output { + self.check_v128_load_op(memarg) + } + fn visit_v128_load32x2_u(&mut self, memarg: MemArg) -> Self::Output { + self.check_v128_load_op(memarg) + } + fn visit_v128_load8_lane(&mut self, memarg: MemArg, lane: u8) -> Self::Output { + let idx = self.check_memarg(memarg)?; + self.check_simd_lane_index(lane, 16)?; + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(idx))?; + self.push_operand(ValType::V128)?; + Ok(()) + } + fn visit_v128_load16_lane(&mut self, memarg: MemArg, lane: u8) -> Self::Output { + let idx = self.check_memarg(memarg)?; + self.check_simd_lane_index(lane, 8)?; + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(idx))?; + self.push_operand(ValType::V128)?; + Ok(()) + } + fn visit_v128_load32_lane(&mut self, memarg: MemArg, lane: u8) -> Self::Output { + let idx = self.check_memarg(memarg)?; + self.check_simd_lane_index(lane, 4)?; + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(idx))?; + self.push_operand(ValType::V128)?; + Ok(()) + } + fn visit_v128_load64_lane(&mut self, memarg: MemArg, lane: u8) -> Self::Output { + let idx = self.check_memarg(memarg)?; + self.check_simd_lane_index(lane, 2)?; + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(idx))?; + self.push_operand(ValType::V128)?; + Ok(()) + } + fn visit_v128_store8_lane(&mut self, memarg: MemArg, lane: u8) -> Self::Output { + let idx = self.check_memarg(memarg)?; + self.check_simd_lane_index(lane, 16)?; + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(idx))?; + Ok(()) + } + fn visit_v128_store16_lane(&mut self, memarg: MemArg, lane: u8) -> Self::Output { + let idx = self.check_memarg(memarg)?; + self.check_simd_lane_index(lane, 8)?; + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(idx))?; + Ok(()) + } + fn visit_v128_store32_lane(&mut self, memarg: MemArg, lane: u8) -> Self::Output { + let idx = self.check_memarg(memarg)?; + self.check_simd_lane_index(lane, 4)?; + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(idx))?; + Ok(()) + } + fn visit_v128_store64_lane(&mut self, memarg: MemArg, lane: u8) -> Self::Output { + let idx = self.check_memarg(memarg)?; + self.check_simd_lane_index(lane, 2)?; + self.pop_operand(Some(ValType::V128))?; + self.pop_operand(Some(idx))?; + Ok(()) + } +} From 28e15df45c97af02ad0c9a256de07d247431ef35 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 19 Nov 2024 16:37:47 +0100 Subject: [PATCH 24/83] add docs to for_each_simd_operator macro --- crates/wasmparser/src/lib.rs | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/crates/wasmparser/src/lib.rs b/crates/wasmparser/src/lib.rs index cf1b5f80ad..b4d5024319 100644 --- a/crates/wasmparser/src/lib.rs +++ b/crates/wasmparser/src/lib.rs @@ -60,6 +60,8 @@ mod prelude { /// the [`VisitOperator`] trait if your use case uniformly handles all operators /// the same way. /// +/// Note: SIMD operators are handled by the [`for_each_simd_operator`] macro. +/// /// It is also possible to specialize handling of operators depending on the /// Wasm proposal from which they are originating. /// @@ -77,8 +79,6 @@ mod prelude { /// - `@saturating_float_to_int`: [Wasm `non_trapping_float-to-int-conversions` proposal] /// - `@bulk_memory `:[Wasm `bulk-memory` proposal] /// - `@threads`: [Wasm `threads` proposal] -/// - `@simd`: [Wasm `simd` proposal] -/// - `@relaxed_simd`: [Wasm `relaxed-simd` proposal] /// - `@gc`: [Wasm `gc` proposal] /// - `@stack_switching`: [Wasm `stack-switching` proposal] /// - `@wide_arithmetic`: [Wasm `wide-arithmetic` proposal] @@ -104,12 +104,6 @@ mod prelude { /// [Wasm `threads` proposal]: /// https://github.com/webassembly/threads /// -/// [Wasm `simd` proposal]: -/// https://github.com/webassembly/simd -/// -/// [Wasm `relaxed-simd` proposal]: -/// https://github.com/WebAssembly/relaxed-simd -/// /// [Wasm `gc` proposal]: /// https://github.com/WebAssembly/gc /// @@ -621,7 +615,24 @@ macro_rules! for_each_operator { }; } -/// Docs: TODO +/// A helper macro to conveniently iterate over all opcodes recognized by this +/// crate. This can be used to work with either the [`SimdOperator`] enumeration or +/// the [`VisitSimdOperator`] trait if your use case uniformly handles all operators +/// the same way. +/// +/// The list of specializable Wasm proposals is as follows: +/// +/// - `@simd`: [Wasm `simd` proposal] +/// - `@relaxed_simd`: [Wasm `relaxed-simd` proposal] +/// +/// For more information about the structure and use of this macro please +/// refer to the documentation of the [`for_each_operator`] macro. +/// +/// [Wasm `simd` proposal]: +/// https://github.com/webassembly/simd +/// +/// [Wasm `relaxed-simd` proposal]: +/// https://github.com/WebAssembly/relaxed-simd #[macro_export] #[cfg(feature = "simd")] macro_rules! for_each_simd_operator { From faa831712d8e98b4871740674cbcf78950d1a95a Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 19 Nov 2024 16:54:56 +0100 Subject: [PATCH 25/83] move for_each_simd_operator macro into a separate file --- crates/wasmparser/src/arity.rs | 2 + crates/wasmparser/src/binary_reader.rs | 2 + crates/wasmparser/src/for_each_simd_op.rs | 291 ++++++++++++++++++ crates/wasmparser/src/lib.rs | 289 +---------------- .../wasmparser/src/readers/core/operators.rs | 2 + .../src/validator/operators/simd.rs | 1 - 6 files changed, 298 insertions(+), 289 deletions(-) create mode 100644 crates/wasmparser/src/for_each_simd_op.rs diff --git a/crates/wasmparser/src/arity.rs b/crates/wasmparser/src/arity.rs index ebd7da4512..618dd8a6b2 100644 --- a/crates/wasmparser/src/arity.rs +++ b/crates/wasmparser/src/arity.rs @@ -13,6 +13,8 @@ * limitations under the License. */ +#[cfg(feature = "simd")] +use crate::for_each_simd_operator; #[cfg(feature = "simd")] use crate::SimdOperator; use crate::{ diff --git a/crates/wasmparser/src/binary_reader.rs b/crates/wasmparser/src/binary_reader.rs index 2099c4aaaa..0e4d7f6e6e 100644 --- a/crates/wasmparser/src/binary_reader.rs +++ b/crates/wasmparser/src/binary_reader.rs @@ -13,6 +13,8 @@ * limitations under the License. */ +#[cfg(feature = "simd")] +use crate::for_each_simd_operator; use crate::prelude::*; use crate::{limits::*, *}; use core::fmt; diff --git a/crates/wasmparser/src/for_each_simd_op.rs b/crates/wasmparser/src/for_each_simd_op.rs new file mode 100644 index 0000000000..ca883ce6ed --- /dev/null +++ b/crates/wasmparser/src/for_each_simd_op.rs @@ -0,0 +1,291 @@ +/// A helper macro to conveniently iterate over all opcodes recognized by this +/// crate. This can be used to work with either the [`SimdOperator`] enumeration or +/// the [`VisitSimdOperator`] trait if your use case uniformly handles all operators +/// the same way. +/// +/// The list of specializable Wasm proposals is as follows: +/// +/// - `@simd`: [Wasm `simd` proposal] +/// - `@relaxed_simd`: [Wasm `relaxed-simd` proposal] +/// +/// For more information about the structure and use of this macro please +/// refer to the documentation of the [`for_each_operator`] macro. +/// +/// [Wasm `simd` proposal]: +/// https://github.com/webassembly/simd +/// +/// [Wasm `relaxed-simd` proposal]: +/// https://github.com/WebAssembly/relaxed-simd +/// +/// [`SimdOperator`]: crate::SimdOperator +/// [`VisitSimdOperator`]: crate::VisitSimdOperator +#[macro_export] +macro_rules! for_each_simd_operator { + ($mac:ident) => { + $mac! { + // 0xFD operators + // 128-bit SIMD + // - https://github.com/webassembly/simd + // - https://webassembly.github.io/simd/core/binary/instructions.html + @simd V128Load { memarg: $crate::MemArg } => visit_v128_load (load v128) + @simd V128Load8x8S { memarg: $crate::MemArg } => visit_v128_load8x8_s (load v128) + @simd V128Load8x8U { memarg: $crate::MemArg } => visit_v128_load8x8_u (load v128) + @simd V128Load16x4S { memarg: $crate::MemArg } => visit_v128_load16x4_s (load v128) + @simd V128Load16x4U { memarg: $crate::MemArg } => visit_v128_load16x4_u (load v128) + @simd V128Load32x2S { memarg: $crate::MemArg } => visit_v128_load32x2_s (load v128) + @simd V128Load32x2U { memarg: $crate::MemArg } => visit_v128_load32x2_u (load v128) + @simd V128Load8Splat { memarg: $crate::MemArg } => visit_v128_load8_splat (load v128) + @simd V128Load16Splat { memarg: $crate::MemArg } => visit_v128_load16_splat (load v128) + @simd V128Load32Splat { memarg: $crate::MemArg } => visit_v128_load32_splat (load v128) + @simd V128Load64Splat { memarg: $crate::MemArg } => visit_v128_load64_splat (load v128) + @simd V128Load32Zero { memarg: $crate::MemArg } => visit_v128_load32_zero (load v128) + @simd V128Load64Zero { memarg: $crate::MemArg } => visit_v128_load64_zero (load v128) + @simd V128Store { memarg: $crate::MemArg } => visit_v128_store (store v128) + @simd V128Load8Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_load8_lane (load lane 16) + @simd V128Load16Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_load16_lane (load lane 8) + @simd V128Load32Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_load32_lane (load lane 4) + @simd V128Load64Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_load64_lane (load lane 2) + @simd V128Store8Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_store8_lane (store lane 16) + @simd V128Store16Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_store16_lane (store lane 8) + @simd V128Store32Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_store32_lane (store lane 4) + @simd V128Store64Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_store64_lane (store lane 2) + @simd V128Const { value: $crate::V128 } => visit_v128_const (push v128) + @simd I8x16Shuffle { lanes: [u8; 16] } => visit_i8x16_shuffle (arity 2 -> 1) + @simd I8x16ExtractLaneS { lane: u8 } => visit_i8x16_extract_lane_s (extract i32 16) + @simd I8x16ExtractLaneU { lane: u8 } => visit_i8x16_extract_lane_u (extract i32 16) + @simd I8x16ReplaceLane { lane: u8 } => visit_i8x16_replace_lane (replace i32 16) + @simd I16x8ExtractLaneS { lane: u8 } => visit_i16x8_extract_lane_s (extract i32 8) + @simd I16x8ExtractLaneU { lane: u8 } => visit_i16x8_extract_lane_u (extract i32 8) + @simd I16x8ReplaceLane { lane: u8 } => visit_i16x8_replace_lane (replace i32 8) + @simd I32x4ExtractLane { lane: u8 } => visit_i32x4_extract_lane (extract i32 4) + @simd I32x4ReplaceLane { lane: u8 } => visit_i32x4_replace_lane (replace i32 4) + @simd I64x2ExtractLane { lane: u8 } => visit_i64x2_extract_lane (extract i64 2) + @simd I64x2ReplaceLane { lane: u8 } => visit_i64x2_replace_lane (replace i64 2) + @simd F32x4ExtractLane { lane: u8 } => visit_f32x4_extract_lane (extract f32 4) + @simd F32x4ReplaceLane { lane: u8 } => visit_f32x4_replace_lane (replace f32 4) + @simd F64x2ExtractLane { lane: u8 } => visit_f64x2_extract_lane (extract f64 2) + @simd F64x2ReplaceLane { lane: u8 } => visit_f64x2_replace_lane (replace f64 2) + @simd I8x16Swizzle => visit_i8x16_swizzle (binary v128) + @simd I8x16Splat => visit_i8x16_splat (splat i32) + @simd I16x8Splat => visit_i16x8_splat (splat i32) + @simd I32x4Splat => visit_i32x4_splat (splat i32) + @simd I64x2Splat => visit_i64x2_splat (splat i64) + @simd F32x4Splat => visit_f32x4_splat (splat f32) + @simd F64x2Splat => visit_f64x2_splat (splat f64) + @simd I8x16Eq => visit_i8x16_eq (binary v128) + @simd I8x16Ne => visit_i8x16_ne (binary v128) + @simd I8x16LtS => visit_i8x16_lt_s (binary v128) + @simd I8x16LtU => visit_i8x16_lt_u (binary v128) + @simd I8x16GtS => visit_i8x16_gt_s (binary v128) + @simd I8x16GtU => visit_i8x16_gt_u (binary v128) + @simd I8x16LeS => visit_i8x16_le_s (binary v128) + @simd I8x16LeU => visit_i8x16_le_u (binary v128) + @simd I8x16GeS => visit_i8x16_ge_s (binary v128) + @simd I8x16GeU => visit_i8x16_ge_u (binary v128) + @simd I16x8Eq => visit_i16x8_eq (binary v128) + @simd I16x8Ne => visit_i16x8_ne (binary v128) + @simd I16x8LtS => visit_i16x8_lt_s (binary v128) + @simd I16x8LtU => visit_i16x8_lt_u (binary v128) + @simd I16x8GtS => visit_i16x8_gt_s (binary v128) + @simd I16x8GtU => visit_i16x8_gt_u (binary v128) + @simd I16x8LeS => visit_i16x8_le_s (binary v128) + @simd I16x8LeU => visit_i16x8_le_u (binary v128) + @simd I16x8GeS => visit_i16x8_ge_s (binary v128) + @simd I16x8GeU => visit_i16x8_ge_u (binary v128) + @simd I32x4Eq => visit_i32x4_eq (binary v128) + @simd I32x4Ne => visit_i32x4_ne (binary v128) + @simd I32x4LtS => visit_i32x4_lt_s (binary v128) + @simd I32x4LtU => visit_i32x4_lt_u (binary v128) + @simd I32x4GtS => visit_i32x4_gt_s (binary v128) + @simd I32x4GtU => visit_i32x4_gt_u (binary v128) + @simd I32x4LeS => visit_i32x4_le_s (binary v128) + @simd I32x4LeU => visit_i32x4_le_u (binary v128) + @simd I32x4GeS => visit_i32x4_ge_s (binary v128) + @simd I32x4GeU => visit_i32x4_ge_u (binary v128) + @simd I64x2Eq => visit_i64x2_eq (binary v128) + @simd I64x2Ne => visit_i64x2_ne (binary v128) + @simd I64x2LtS => visit_i64x2_lt_s (binary v128) + @simd I64x2GtS => visit_i64x2_gt_s (binary v128) + @simd I64x2LeS => visit_i64x2_le_s (binary v128) + @simd I64x2GeS => visit_i64x2_ge_s (binary v128) + @simd F32x4Eq => visit_f32x4_eq (binary v128f) + @simd F32x4Ne => visit_f32x4_ne (binary v128f) + @simd F32x4Lt => visit_f32x4_lt (binary v128f) + @simd F32x4Gt => visit_f32x4_gt (binary v128f) + @simd F32x4Le => visit_f32x4_le (binary v128f) + @simd F32x4Ge => visit_f32x4_ge (binary v128f) + @simd F64x2Eq => visit_f64x2_eq (binary v128f) + @simd F64x2Ne => visit_f64x2_ne (binary v128f) + @simd F64x2Lt => visit_f64x2_lt (binary v128f) + @simd F64x2Gt => visit_f64x2_gt (binary v128f) + @simd F64x2Le => visit_f64x2_le (binary v128f) + @simd F64x2Ge => visit_f64x2_ge (binary v128f) + @simd V128Not => visit_v128_not (unary v128) + @simd V128And => visit_v128_and (binary v128) + @simd V128AndNot => visit_v128_andnot (binary v128) + @simd V128Or => visit_v128_or (binary v128) + @simd V128Xor => visit_v128_xor (binary v128) + @simd V128Bitselect => visit_v128_bitselect (ternary v128) + @simd V128AnyTrue => visit_v128_any_true (test v128) + @simd I8x16Abs => visit_i8x16_abs (unary v128) + @simd I8x16Neg => visit_i8x16_neg (unary v128) + @simd I8x16Popcnt => visit_i8x16_popcnt (unary v128) + @simd I8x16AllTrue => visit_i8x16_all_true (test v128) + @simd I8x16Bitmask => visit_i8x16_bitmask (test v128) + @simd I8x16NarrowI16x8S => visit_i8x16_narrow_i16x8_s (binary v128) + @simd I8x16NarrowI16x8U => visit_i8x16_narrow_i16x8_u (binary v128) + @simd I8x16Shl => visit_i8x16_shl (shift v128) + @simd I8x16ShrS => visit_i8x16_shr_s (shift v128) + @simd I8x16ShrU => visit_i8x16_shr_u (shift v128) + @simd I8x16Add => visit_i8x16_add (binary v128) + @simd I8x16AddSatS => visit_i8x16_add_sat_s (binary v128) + @simd I8x16AddSatU => visit_i8x16_add_sat_u (binary v128) + @simd I8x16Sub => visit_i8x16_sub (binary v128) + @simd I8x16SubSatS => visit_i8x16_sub_sat_s (binary v128) + @simd I8x16SubSatU => visit_i8x16_sub_sat_u (binary v128) + @simd I8x16MinS => visit_i8x16_min_s (binary v128) + @simd I8x16MinU => visit_i8x16_min_u (binary v128) + @simd I8x16MaxS => visit_i8x16_max_s (binary v128) + @simd I8x16MaxU => visit_i8x16_max_u (binary v128) + @simd I8x16AvgrU => visit_i8x16_avgr_u (binary v128) + @simd I16x8ExtAddPairwiseI8x16S => visit_i16x8_extadd_pairwise_i8x16_s (unary v128) + @simd I16x8ExtAddPairwiseI8x16U => visit_i16x8_extadd_pairwise_i8x16_u (unary v128) + @simd I16x8Abs => visit_i16x8_abs (unary v128) + @simd I16x8Neg => visit_i16x8_neg (unary v128) + @simd I16x8Q15MulrSatS => visit_i16x8_q15mulr_sat_s (binary v128) + @simd I16x8AllTrue => visit_i16x8_all_true (test v128) + @simd I16x8Bitmask => visit_i16x8_bitmask (test v128) + @simd I16x8NarrowI32x4S => visit_i16x8_narrow_i32x4_s (binary v128) + @simd I16x8NarrowI32x4U => visit_i16x8_narrow_i32x4_u (binary v128) + @simd I16x8ExtendLowI8x16S => visit_i16x8_extend_low_i8x16_s (unary v128) + @simd I16x8ExtendHighI8x16S => visit_i16x8_extend_high_i8x16_s (unary v128) + @simd I16x8ExtendLowI8x16U => visit_i16x8_extend_low_i8x16_u (unary v128) + @simd I16x8ExtendHighI8x16U => visit_i16x8_extend_high_i8x16_u (unary v128) + @simd I16x8Shl => visit_i16x8_shl (shift v128) + @simd I16x8ShrS => visit_i16x8_shr_s (shift v128) + @simd I16x8ShrU => visit_i16x8_shr_u (shift v128) + @simd I16x8Add => visit_i16x8_add (binary v128) + @simd I16x8AddSatS => visit_i16x8_add_sat_s (binary v128) + @simd I16x8AddSatU => visit_i16x8_add_sat_u (binary v128) + @simd I16x8Sub => visit_i16x8_sub (binary v128) + @simd I16x8SubSatS => visit_i16x8_sub_sat_s (binary v128) + @simd I16x8SubSatU => visit_i16x8_sub_sat_u (binary v128) + @simd I16x8Mul => visit_i16x8_mul (binary v128) + @simd I16x8MinS => visit_i16x8_min_s (binary v128) + @simd I16x8MinU => visit_i16x8_min_u (binary v128) + @simd I16x8MaxS => visit_i16x8_max_s (binary v128) + @simd I16x8MaxU => visit_i16x8_max_u (binary v128) + @simd I16x8AvgrU => visit_i16x8_avgr_u (binary v128) + @simd I16x8ExtMulLowI8x16S => visit_i16x8_extmul_low_i8x16_s (binary v128) + @simd I16x8ExtMulHighI8x16S => visit_i16x8_extmul_high_i8x16_s (binary v128) + @simd I16x8ExtMulLowI8x16U => visit_i16x8_extmul_low_i8x16_u (binary v128) + @simd I16x8ExtMulHighI8x16U => visit_i16x8_extmul_high_i8x16_u (binary v128) + @simd I32x4ExtAddPairwiseI16x8S => visit_i32x4_extadd_pairwise_i16x8_s (unary v128) + @simd I32x4ExtAddPairwiseI16x8U => visit_i32x4_extadd_pairwise_i16x8_u (unary v128) + @simd I32x4Abs => visit_i32x4_abs (unary v128) + @simd I32x4Neg => visit_i32x4_neg (unary v128) + @simd I32x4AllTrue => visit_i32x4_all_true (test v128) + @simd I32x4Bitmask => visit_i32x4_bitmask (test v128) + @simd I32x4ExtendLowI16x8S => visit_i32x4_extend_low_i16x8_s (unary v128) + @simd I32x4ExtendHighI16x8S => visit_i32x4_extend_high_i16x8_s (unary v128) + @simd I32x4ExtendLowI16x8U => visit_i32x4_extend_low_i16x8_u (unary v128) + @simd I32x4ExtendHighI16x8U => visit_i32x4_extend_high_i16x8_u (unary v128) + @simd I32x4Shl => visit_i32x4_shl (shift v128) + @simd I32x4ShrS => visit_i32x4_shr_s (shift v128) + @simd I32x4ShrU => visit_i32x4_shr_u (shift v128) + @simd I32x4Add => visit_i32x4_add (binary v128) + @simd I32x4Sub => visit_i32x4_sub (binary v128) + @simd I32x4Mul => visit_i32x4_mul (binary v128) + @simd I32x4MinS => visit_i32x4_min_s (binary v128) + @simd I32x4MinU => visit_i32x4_min_u (binary v128) + @simd I32x4MaxS => visit_i32x4_max_s (binary v128) + @simd I32x4MaxU => visit_i32x4_max_u (binary v128) + @simd I32x4DotI16x8S => visit_i32x4_dot_i16x8_s (binary v128) + @simd I32x4ExtMulLowI16x8S => visit_i32x4_extmul_low_i16x8_s (binary v128) + @simd I32x4ExtMulHighI16x8S => visit_i32x4_extmul_high_i16x8_s (binary v128) + @simd I32x4ExtMulLowI16x8U => visit_i32x4_extmul_low_i16x8_u (binary v128) + @simd I32x4ExtMulHighI16x8U => visit_i32x4_extmul_high_i16x8_u (binary v128) + @simd I64x2Abs => visit_i64x2_abs (unary v128) + @simd I64x2Neg => visit_i64x2_neg (unary v128) + @simd I64x2AllTrue => visit_i64x2_all_true (test v128) + @simd I64x2Bitmask => visit_i64x2_bitmask (test v128) + @simd I64x2ExtendLowI32x4S => visit_i64x2_extend_low_i32x4_s (unary v128) + @simd I64x2ExtendHighI32x4S => visit_i64x2_extend_high_i32x4_s (unary v128) + @simd I64x2ExtendLowI32x4U => visit_i64x2_extend_low_i32x4_u (unary v128) + @simd I64x2ExtendHighI32x4U => visit_i64x2_extend_high_i32x4_u (unary v128) + @simd I64x2Shl => visit_i64x2_shl (shift v128) + @simd I64x2ShrS => visit_i64x2_shr_s (shift v128) + @simd I64x2ShrU => visit_i64x2_shr_u (shift v128) + @simd I64x2Add => visit_i64x2_add (binary v128) + @simd I64x2Sub => visit_i64x2_sub (binary v128) + @simd I64x2Mul => visit_i64x2_mul (binary v128) + @simd I64x2ExtMulLowI32x4S => visit_i64x2_extmul_low_i32x4_s (binary v128) + @simd I64x2ExtMulHighI32x4S => visit_i64x2_extmul_high_i32x4_s (binary v128) + @simd I64x2ExtMulLowI32x4U => visit_i64x2_extmul_low_i32x4_u (binary v128) + @simd I64x2ExtMulHighI32x4U => visit_i64x2_extmul_high_i32x4_u (binary v128) + @simd F32x4Ceil => visit_f32x4_ceil (unary v128f) + @simd F32x4Floor => visit_f32x4_floor (unary v128f) + @simd F32x4Trunc => visit_f32x4_trunc (unary v128f) + @simd F32x4Nearest => visit_f32x4_nearest (unary v128f) + @simd F32x4Abs => visit_f32x4_abs (unary v128f) + @simd F32x4Neg => visit_f32x4_neg (unary v128f) + @simd F32x4Sqrt => visit_f32x4_sqrt (unary v128f) + @simd F32x4Add => visit_f32x4_add (binary v128f) + @simd F32x4Sub => visit_f32x4_sub (binary v128f) + @simd F32x4Mul => visit_f32x4_mul (binary v128f) + @simd F32x4Div => visit_f32x4_div (binary v128f) + @simd F32x4Min => visit_f32x4_min (binary v128f) + @simd F32x4Max => visit_f32x4_max (binary v128f) + @simd F32x4PMin => visit_f32x4_pmin (binary v128f) + @simd F32x4PMax => visit_f32x4_pmax (binary v128f) + @simd F64x2Ceil => visit_f64x2_ceil (unary v128f) + @simd F64x2Floor => visit_f64x2_floor (unary v128f) + @simd F64x2Trunc => visit_f64x2_trunc (unary v128f) + @simd F64x2Nearest => visit_f64x2_nearest (unary v128f) + @simd F64x2Abs => visit_f64x2_abs (unary v128f) + @simd F64x2Neg => visit_f64x2_neg (unary v128f) + @simd F64x2Sqrt => visit_f64x2_sqrt (unary v128f) + @simd F64x2Add => visit_f64x2_add (binary v128f) + @simd F64x2Sub => visit_f64x2_sub (binary v128f) + @simd F64x2Mul => visit_f64x2_mul (binary v128f) + @simd F64x2Div => visit_f64x2_div (binary v128f) + @simd F64x2Min => visit_f64x2_min (binary v128f) + @simd F64x2Max => visit_f64x2_max (binary v128f) + @simd F64x2PMin => visit_f64x2_pmin (binary v128f) + @simd F64x2PMax => visit_f64x2_pmax (binary v128f) + @simd I32x4TruncSatF32x4S => visit_i32x4_trunc_sat_f32x4_s (unary v128f) + @simd I32x4TruncSatF32x4U => visit_i32x4_trunc_sat_f32x4_u (unary v128f) + @simd F32x4ConvertI32x4S => visit_f32x4_convert_i32x4_s (unary v128f) + @simd F32x4ConvertI32x4U => visit_f32x4_convert_i32x4_u (unary v128f) + @simd I32x4TruncSatF64x2SZero => visit_i32x4_trunc_sat_f64x2_s_zero (unary v128f) + @simd I32x4TruncSatF64x2UZero => visit_i32x4_trunc_sat_f64x2_u_zero (unary v128f) + @simd F64x2ConvertLowI32x4S => visit_f64x2_convert_low_i32x4_s (unary v128f) + @simd F64x2ConvertLowI32x4U => visit_f64x2_convert_low_i32x4_u (unary v128f) + @simd F32x4DemoteF64x2Zero => visit_f32x4_demote_f64x2_zero (unary v128f) + @simd F64x2PromoteLowF32x4 => visit_f64x2_promote_low_f32x4 (unary v128f) + + // Relaxed SIMD operators + // https://github.com/WebAssembly/relaxed-simd + @relaxed_simd I8x16RelaxedSwizzle => visit_i8x16_relaxed_swizzle (binary v128) + @relaxed_simd I32x4RelaxedTruncF32x4S => visit_i32x4_relaxed_trunc_f32x4_s (unary v128) + @relaxed_simd I32x4RelaxedTruncF32x4U => visit_i32x4_relaxed_trunc_f32x4_u (unary v128) + @relaxed_simd I32x4RelaxedTruncF64x2SZero => visit_i32x4_relaxed_trunc_f64x2_s_zero (unary v128) + @relaxed_simd I32x4RelaxedTruncF64x2UZero => visit_i32x4_relaxed_trunc_f64x2_u_zero (unary v128) + @relaxed_simd F32x4RelaxedMadd => visit_f32x4_relaxed_madd (ternary v128) + @relaxed_simd F32x4RelaxedNmadd => visit_f32x4_relaxed_nmadd (ternary v128) + @relaxed_simd F64x2RelaxedMadd => visit_f64x2_relaxed_madd (ternary v128) + @relaxed_simd F64x2RelaxedNmadd => visit_f64x2_relaxed_nmadd (ternary v128) + @relaxed_simd I8x16RelaxedLaneselect => visit_i8x16_relaxed_laneselect (ternary v128) + @relaxed_simd I16x8RelaxedLaneselect => visit_i16x8_relaxed_laneselect (ternary v128) + @relaxed_simd I32x4RelaxedLaneselect => visit_i32x4_relaxed_laneselect (ternary v128) + @relaxed_simd I64x2RelaxedLaneselect => visit_i64x2_relaxed_laneselect (ternary v128) + @relaxed_simd F32x4RelaxedMin => visit_f32x4_relaxed_min (binary v128) + @relaxed_simd F32x4RelaxedMax => visit_f32x4_relaxed_max (binary v128) + @relaxed_simd F64x2RelaxedMin => visit_f64x2_relaxed_min (binary v128) + @relaxed_simd F64x2RelaxedMax => visit_f64x2_relaxed_max (binary v128) + @relaxed_simd I16x8RelaxedQ15mulrS => visit_i16x8_relaxed_q15mulr_s (binary v128) + @relaxed_simd I16x8RelaxedDotI8x16I7x16S => visit_i16x8_relaxed_dot_i8x16_i7x16_s (binary v128) + @relaxed_simd I32x4RelaxedDotI8x16I7x16AddS => visit_i32x4_relaxed_dot_i8x16_i7x16_add_s (ternary v128) + } + }; +} diff --git a/crates/wasmparser/src/lib.rs b/crates/wasmparser/src/lib.rs index b4d5024319..50eea01a2f 100644 --- a/crates/wasmparser/src/lib.rs +++ b/crates/wasmparser/src/lib.rs @@ -615,295 +615,8 @@ macro_rules! for_each_operator { }; } -/// A helper macro to conveniently iterate over all opcodes recognized by this -/// crate. This can be used to work with either the [`SimdOperator`] enumeration or -/// the [`VisitSimdOperator`] trait if your use case uniformly handles all operators -/// the same way. -/// -/// The list of specializable Wasm proposals is as follows: -/// -/// - `@simd`: [Wasm `simd` proposal] -/// - `@relaxed_simd`: [Wasm `relaxed-simd` proposal] -/// -/// For more information about the structure and use of this macro please -/// refer to the documentation of the [`for_each_operator`] macro. -/// -/// [Wasm `simd` proposal]: -/// https://github.com/webassembly/simd -/// -/// [Wasm `relaxed-simd` proposal]: -/// https://github.com/WebAssembly/relaxed-simd -#[macro_export] #[cfg(feature = "simd")] -macro_rules! for_each_simd_operator { - ($mac:ident) => { - $mac! { - // 0xFD operators - // 128-bit SIMD - // - https://github.com/webassembly/simd - // - https://webassembly.github.io/simd/core/binary/instructions.html - @simd V128Load { memarg: $crate::MemArg } => visit_v128_load (load v128) - @simd V128Load8x8S { memarg: $crate::MemArg } => visit_v128_load8x8_s (load v128) - @simd V128Load8x8U { memarg: $crate::MemArg } => visit_v128_load8x8_u (load v128) - @simd V128Load16x4S { memarg: $crate::MemArg } => visit_v128_load16x4_s (load v128) - @simd V128Load16x4U { memarg: $crate::MemArg } => visit_v128_load16x4_u (load v128) - @simd V128Load32x2S { memarg: $crate::MemArg } => visit_v128_load32x2_s (load v128) - @simd V128Load32x2U { memarg: $crate::MemArg } => visit_v128_load32x2_u (load v128) - @simd V128Load8Splat { memarg: $crate::MemArg } => visit_v128_load8_splat (load v128) - @simd V128Load16Splat { memarg: $crate::MemArg } => visit_v128_load16_splat (load v128) - @simd V128Load32Splat { memarg: $crate::MemArg } => visit_v128_load32_splat (load v128) - @simd V128Load64Splat { memarg: $crate::MemArg } => visit_v128_load64_splat (load v128) - @simd V128Load32Zero { memarg: $crate::MemArg } => visit_v128_load32_zero (load v128) - @simd V128Load64Zero { memarg: $crate::MemArg } => visit_v128_load64_zero (load v128) - @simd V128Store { memarg: $crate::MemArg } => visit_v128_store (store v128) - @simd V128Load8Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_load8_lane (load lane 16) - @simd V128Load16Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_load16_lane (load lane 8) - @simd V128Load32Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_load32_lane (load lane 4) - @simd V128Load64Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_load64_lane (load lane 2) - @simd V128Store8Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_store8_lane (store lane 16) - @simd V128Store16Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_store16_lane (store lane 8) - @simd V128Store32Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_store32_lane (store lane 4) - @simd V128Store64Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_store64_lane (store lane 2) - @simd V128Const { value: $crate::V128 } => visit_v128_const (push v128) - @simd I8x16Shuffle { lanes: [u8; 16] } => visit_i8x16_shuffle (arity 2 -> 1) - @simd I8x16ExtractLaneS { lane: u8 } => visit_i8x16_extract_lane_s (extract i32 16) - @simd I8x16ExtractLaneU { lane: u8 } => visit_i8x16_extract_lane_u (extract i32 16) - @simd I8x16ReplaceLane { lane: u8 } => visit_i8x16_replace_lane (replace i32 16) - @simd I16x8ExtractLaneS { lane: u8 } => visit_i16x8_extract_lane_s (extract i32 8) - @simd I16x8ExtractLaneU { lane: u8 } => visit_i16x8_extract_lane_u (extract i32 8) - @simd I16x8ReplaceLane { lane: u8 } => visit_i16x8_replace_lane (replace i32 8) - @simd I32x4ExtractLane { lane: u8 } => visit_i32x4_extract_lane (extract i32 4) - @simd I32x4ReplaceLane { lane: u8 } => visit_i32x4_replace_lane (replace i32 4) - @simd I64x2ExtractLane { lane: u8 } => visit_i64x2_extract_lane (extract i64 2) - @simd I64x2ReplaceLane { lane: u8 } => visit_i64x2_replace_lane (replace i64 2) - @simd F32x4ExtractLane { lane: u8 } => visit_f32x4_extract_lane (extract f32 4) - @simd F32x4ReplaceLane { lane: u8 } => visit_f32x4_replace_lane (replace f32 4) - @simd F64x2ExtractLane { lane: u8 } => visit_f64x2_extract_lane (extract f64 2) - @simd F64x2ReplaceLane { lane: u8 } => visit_f64x2_replace_lane (replace f64 2) - @simd I8x16Swizzle => visit_i8x16_swizzle (binary v128) - @simd I8x16Splat => visit_i8x16_splat (splat i32) - @simd I16x8Splat => visit_i16x8_splat (splat i32) - @simd I32x4Splat => visit_i32x4_splat (splat i32) - @simd I64x2Splat => visit_i64x2_splat (splat i64) - @simd F32x4Splat => visit_f32x4_splat (splat f32) - @simd F64x2Splat => visit_f64x2_splat (splat f64) - @simd I8x16Eq => visit_i8x16_eq (binary v128) - @simd I8x16Ne => visit_i8x16_ne (binary v128) - @simd I8x16LtS => visit_i8x16_lt_s (binary v128) - @simd I8x16LtU => visit_i8x16_lt_u (binary v128) - @simd I8x16GtS => visit_i8x16_gt_s (binary v128) - @simd I8x16GtU => visit_i8x16_gt_u (binary v128) - @simd I8x16LeS => visit_i8x16_le_s (binary v128) - @simd I8x16LeU => visit_i8x16_le_u (binary v128) - @simd I8x16GeS => visit_i8x16_ge_s (binary v128) - @simd I8x16GeU => visit_i8x16_ge_u (binary v128) - @simd I16x8Eq => visit_i16x8_eq (binary v128) - @simd I16x8Ne => visit_i16x8_ne (binary v128) - @simd I16x8LtS => visit_i16x8_lt_s (binary v128) - @simd I16x8LtU => visit_i16x8_lt_u (binary v128) - @simd I16x8GtS => visit_i16x8_gt_s (binary v128) - @simd I16x8GtU => visit_i16x8_gt_u (binary v128) - @simd I16x8LeS => visit_i16x8_le_s (binary v128) - @simd I16x8LeU => visit_i16x8_le_u (binary v128) - @simd I16x8GeS => visit_i16x8_ge_s (binary v128) - @simd I16x8GeU => visit_i16x8_ge_u (binary v128) - @simd I32x4Eq => visit_i32x4_eq (binary v128) - @simd I32x4Ne => visit_i32x4_ne (binary v128) - @simd I32x4LtS => visit_i32x4_lt_s (binary v128) - @simd I32x4LtU => visit_i32x4_lt_u (binary v128) - @simd I32x4GtS => visit_i32x4_gt_s (binary v128) - @simd I32x4GtU => visit_i32x4_gt_u (binary v128) - @simd I32x4LeS => visit_i32x4_le_s (binary v128) - @simd I32x4LeU => visit_i32x4_le_u (binary v128) - @simd I32x4GeS => visit_i32x4_ge_s (binary v128) - @simd I32x4GeU => visit_i32x4_ge_u (binary v128) - @simd I64x2Eq => visit_i64x2_eq (binary v128) - @simd I64x2Ne => visit_i64x2_ne (binary v128) - @simd I64x2LtS => visit_i64x2_lt_s (binary v128) - @simd I64x2GtS => visit_i64x2_gt_s (binary v128) - @simd I64x2LeS => visit_i64x2_le_s (binary v128) - @simd I64x2GeS => visit_i64x2_ge_s (binary v128) - @simd F32x4Eq => visit_f32x4_eq (binary v128f) - @simd F32x4Ne => visit_f32x4_ne (binary v128f) - @simd F32x4Lt => visit_f32x4_lt (binary v128f) - @simd F32x4Gt => visit_f32x4_gt (binary v128f) - @simd F32x4Le => visit_f32x4_le (binary v128f) - @simd F32x4Ge => visit_f32x4_ge (binary v128f) - @simd F64x2Eq => visit_f64x2_eq (binary v128f) - @simd F64x2Ne => visit_f64x2_ne (binary v128f) - @simd F64x2Lt => visit_f64x2_lt (binary v128f) - @simd F64x2Gt => visit_f64x2_gt (binary v128f) - @simd F64x2Le => visit_f64x2_le (binary v128f) - @simd F64x2Ge => visit_f64x2_ge (binary v128f) - @simd V128Not => visit_v128_not (unary v128) - @simd V128And => visit_v128_and (binary v128) - @simd V128AndNot => visit_v128_andnot (binary v128) - @simd V128Or => visit_v128_or (binary v128) - @simd V128Xor => visit_v128_xor (binary v128) - @simd V128Bitselect => visit_v128_bitselect (ternary v128) - @simd V128AnyTrue => visit_v128_any_true (test v128) - @simd I8x16Abs => visit_i8x16_abs (unary v128) - @simd I8x16Neg => visit_i8x16_neg (unary v128) - @simd I8x16Popcnt => visit_i8x16_popcnt (unary v128) - @simd I8x16AllTrue => visit_i8x16_all_true (test v128) - @simd I8x16Bitmask => visit_i8x16_bitmask (test v128) - @simd I8x16NarrowI16x8S => visit_i8x16_narrow_i16x8_s (binary v128) - @simd I8x16NarrowI16x8U => visit_i8x16_narrow_i16x8_u (binary v128) - @simd I8x16Shl => visit_i8x16_shl (shift v128) - @simd I8x16ShrS => visit_i8x16_shr_s (shift v128) - @simd I8x16ShrU => visit_i8x16_shr_u (shift v128) - @simd I8x16Add => visit_i8x16_add (binary v128) - @simd I8x16AddSatS => visit_i8x16_add_sat_s (binary v128) - @simd I8x16AddSatU => visit_i8x16_add_sat_u (binary v128) - @simd I8x16Sub => visit_i8x16_sub (binary v128) - @simd I8x16SubSatS => visit_i8x16_sub_sat_s (binary v128) - @simd I8x16SubSatU => visit_i8x16_sub_sat_u (binary v128) - @simd I8x16MinS => visit_i8x16_min_s (binary v128) - @simd I8x16MinU => visit_i8x16_min_u (binary v128) - @simd I8x16MaxS => visit_i8x16_max_s (binary v128) - @simd I8x16MaxU => visit_i8x16_max_u (binary v128) - @simd I8x16AvgrU => visit_i8x16_avgr_u (binary v128) - @simd I16x8ExtAddPairwiseI8x16S => visit_i16x8_extadd_pairwise_i8x16_s (unary v128) - @simd I16x8ExtAddPairwiseI8x16U => visit_i16x8_extadd_pairwise_i8x16_u (unary v128) - @simd I16x8Abs => visit_i16x8_abs (unary v128) - @simd I16x8Neg => visit_i16x8_neg (unary v128) - @simd I16x8Q15MulrSatS => visit_i16x8_q15mulr_sat_s (binary v128) - @simd I16x8AllTrue => visit_i16x8_all_true (test v128) - @simd I16x8Bitmask => visit_i16x8_bitmask (test v128) - @simd I16x8NarrowI32x4S => visit_i16x8_narrow_i32x4_s (binary v128) - @simd I16x8NarrowI32x4U => visit_i16x8_narrow_i32x4_u (binary v128) - @simd I16x8ExtendLowI8x16S => visit_i16x8_extend_low_i8x16_s (unary v128) - @simd I16x8ExtendHighI8x16S => visit_i16x8_extend_high_i8x16_s (unary v128) - @simd I16x8ExtendLowI8x16U => visit_i16x8_extend_low_i8x16_u (unary v128) - @simd I16x8ExtendHighI8x16U => visit_i16x8_extend_high_i8x16_u (unary v128) - @simd I16x8Shl => visit_i16x8_shl (shift v128) - @simd I16x8ShrS => visit_i16x8_shr_s (shift v128) - @simd I16x8ShrU => visit_i16x8_shr_u (shift v128) - @simd I16x8Add => visit_i16x8_add (binary v128) - @simd I16x8AddSatS => visit_i16x8_add_sat_s (binary v128) - @simd I16x8AddSatU => visit_i16x8_add_sat_u (binary v128) - @simd I16x8Sub => visit_i16x8_sub (binary v128) - @simd I16x8SubSatS => visit_i16x8_sub_sat_s (binary v128) - @simd I16x8SubSatU => visit_i16x8_sub_sat_u (binary v128) - @simd I16x8Mul => visit_i16x8_mul (binary v128) - @simd I16x8MinS => visit_i16x8_min_s (binary v128) - @simd I16x8MinU => visit_i16x8_min_u (binary v128) - @simd I16x8MaxS => visit_i16x8_max_s (binary v128) - @simd I16x8MaxU => visit_i16x8_max_u (binary v128) - @simd I16x8AvgrU => visit_i16x8_avgr_u (binary v128) - @simd I16x8ExtMulLowI8x16S => visit_i16x8_extmul_low_i8x16_s (binary v128) - @simd I16x8ExtMulHighI8x16S => visit_i16x8_extmul_high_i8x16_s (binary v128) - @simd I16x8ExtMulLowI8x16U => visit_i16x8_extmul_low_i8x16_u (binary v128) - @simd I16x8ExtMulHighI8x16U => visit_i16x8_extmul_high_i8x16_u (binary v128) - @simd I32x4ExtAddPairwiseI16x8S => visit_i32x4_extadd_pairwise_i16x8_s (unary v128) - @simd I32x4ExtAddPairwiseI16x8U => visit_i32x4_extadd_pairwise_i16x8_u (unary v128) - @simd I32x4Abs => visit_i32x4_abs (unary v128) - @simd I32x4Neg => visit_i32x4_neg (unary v128) - @simd I32x4AllTrue => visit_i32x4_all_true (test v128) - @simd I32x4Bitmask => visit_i32x4_bitmask (test v128) - @simd I32x4ExtendLowI16x8S => visit_i32x4_extend_low_i16x8_s (unary v128) - @simd I32x4ExtendHighI16x8S => visit_i32x4_extend_high_i16x8_s (unary v128) - @simd I32x4ExtendLowI16x8U => visit_i32x4_extend_low_i16x8_u (unary v128) - @simd I32x4ExtendHighI16x8U => visit_i32x4_extend_high_i16x8_u (unary v128) - @simd I32x4Shl => visit_i32x4_shl (shift v128) - @simd I32x4ShrS => visit_i32x4_shr_s (shift v128) - @simd I32x4ShrU => visit_i32x4_shr_u (shift v128) - @simd I32x4Add => visit_i32x4_add (binary v128) - @simd I32x4Sub => visit_i32x4_sub (binary v128) - @simd I32x4Mul => visit_i32x4_mul (binary v128) - @simd I32x4MinS => visit_i32x4_min_s (binary v128) - @simd I32x4MinU => visit_i32x4_min_u (binary v128) - @simd I32x4MaxS => visit_i32x4_max_s (binary v128) - @simd I32x4MaxU => visit_i32x4_max_u (binary v128) - @simd I32x4DotI16x8S => visit_i32x4_dot_i16x8_s (binary v128) - @simd I32x4ExtMulLowI16x8S => visit_i32x4_extmul_low_i16x8_s (binary v128) - @simd I32x4ExtMulHighI16x8S => visit_i32x4_extmul_high_i16x8_s (binary v128) - @simd I32x4ExtMulLowI16x8U => visit_i32x4_extmul_low_i16x8_u (binary v128) - @simd I32x4ExtMulHighI16x8U => visit_i32x4_extmul_high_i16x8_u (binary v128) - @simd I64x2Abs => visit_i64x2_abs (unary v128) - @simd I64x2Neg => visit_i64x2_neg (unary v128) - @simd I64x2AllTrue => visit_i64x2_all_true (test v128) - @simd I64x2Bitmask => visit_i64x2_bitmask (test v128) - @simd I64x2ExtendLowI32x4S => visit_i64x2_extend_low_i32x4_s (unary v128) - @simd I64x2ExtendHighI32x4S => visit_i64x2_extend_high_i32x4_s (unary v128) - @simd I64x2ExtendLowI32x4U => visit_i64x2_extend_low_i32x4_u (unary v128) - @simd I64x2ExtendHighI32x4U => visit_i64x2_extend_high_i32x4_u (unary v128) - @simd I64x2Shl => visit_i64x2_shl (shift v128) - @simd I64x2ShrS => visit_i64x2_shr_s (shift v128) - @simd I64x2ShrU => visit_i64x2_shr_u (shift v128) - @simd I64x2Add => visit_i64x2_add (binary v128) - @simd I64x2Sub => visit_i64x2_sub (binary v128) - @simd I64x2Mul => visit_i64x2_mul (binary v128) - @simd I64x2ExtMulLowI32x4S => visit_i64x2_extmul_low_i32x4_s (binary v128) - @simd I64x2ExtMulHighI32x4S => visit_i64x2_extmul_high_i32x4_s (binary v128) - @simd I64x2ExtMulLowI32x4U => visit_i64x2_extmul_low_i32x4_u (binary v128) - @simd I64x2ExtMulHighI32x4U => visit_i64x2_extmul_high_i32x4_u (binary v128) - @simd F32x4Ceil => visit_f32x4_ceil (unary v128f) - @simd F32x4Floor => visit_f32x4_floor (unary v128f) - @simd F32x4Trunc => visit_f32x4_trunc (unary v128f) - @simd F32x4Nearest => visit_f32x4_nearest (unary v128f) - @simd F32x4Abs => visit_f32x4_abs (unary v128f) - @simd F32x4Neg => visit_f32x4_neg (unary v128f) - @simd F32x4Sqrt => visit_f32x4_sqrt (unary v128f) - @simd F32x4Add => visit_f32x4_add (binary v128f) - @simd F32x4Sub => visit_f32x4_sub (binary v128f) - @simd F32x4Mul => visit_f32x4_mul (binary v128f) - @simd F32x4Div => visit_f32x4_div (binary v128f) - @simd F32x4Min => visit_f32x4_min (binary v128f) - @simd F32x4Max => visit_f32x4_max (binary v128f) - @simd F32x4PMin => visit_f32x4_pmin (binary v128f) - @simd F32x4PMax => visit_f32x4_pmax (binary v128f) - @simd F64x2Ceil => visit_f64x2_ceil (unary v128f) - @simd F64x2Floor => visit_f64x2_floor (unary v128f) - @simd F64x2Trunc => visit_f64x2_trunc (unary v128f) - @simd F64x2Nearest => visit_f64x2_nearest (unary v128f) - @simd F64x2Abs => visit_f64x2_abs (unary v128f) - @simd F64x2Neg => visit_f64x2_neg (unary v128f) - @simd F64x2Sqrt => visit_f64x2_sqrt (unary v128f) - @simd F64x2Add => visit_f64x2_add (binary v128f) - @simd F64x2Sub => visit_f64x2_sub (binary v128f) - @simd F64x2Mul => visit_f64x2_mul (binary v128f) - @simd F64x2Div => visit_f64x2_div (binary v128f) - @simd F64x2Min => visit_f64x2_min (binary v128f) - @simd F64x2Max => visit_f64x2_max (binary v128f) - @simd F64x2PMin => visit_f64x2_pmin (binary v128f) - @simd F64x2PMax => visit_f64x2_pmax (binary v128f) - @simd I32x4TruncSatF32x4S => visit_i32x4_trunc_sat_f32x4_s (unary v128f) - @simd I32x4TruncSatF32x4U => visit_i32x4_trunc_sat_f32x4_u (unary v128f) - @simd F32x4ConvertI32x4S => visit_f32x4_convert_i32x4_s (unary v128f) - @simd F32x4ConvertI32x4U => visit_f32x4_convert_i32x4_u (unary v128f) - @simd I32x4TruncSatF64x2SZero => visit_i32x4_trunc_sat_f64x2_s_zero (unary v128f) - @simd I32x4TruncSatF64x2UZero => visit_i32x4_trunc_sat_f64x2_u_zero (unary v128f) - @simd F64x2ConvertLowI32x4S => visit_f64x2_convert_low_i32x4_s (unary v128f) - @simd F64x2ConvertLowI32x4U => visit_f64x2_convert_low_i32x4_u (unary v128f) - @simd F32x4DemoteF64x2Zero => visit_f32x4_demote_f64x2_zero (unary v128f) - @simd F64x2PromoteLowF32x4 => visit_f64x2_promote_low_f32x4 (unary v128f) - - // Relaxed SIMD operators - // https://github.com/WebAssembly/relaxed-simd - @relaxed_simd I8x16RelaxedSwizzle => visit_i8x16_relaxed_swizzle (binary v128) - @relaxed_simd I32x4RelaxedTruncF32x4S => visit_i32x4_relaxed_trunc_f32x4_s (unary v128) - @relaxed_simd I32x4RelaxedTruncF32x4U => visit_i32x4_relaxed_trunc_f32x4_u (unary v128) - @relaxed_simd I32x4RelaxedTruncF64x2SZero => visit_i32x4_relaxed_trunc_f64x2_s_zero (unary v128) - @relaxed_simd I32x4RelaxedTruncF64x2UZero => visit_i32x4_relaxed_trunc_f64x2_u_zero (unary v128) - @relaxed_simd F32x4RelaxedMadd => visit_f32x4_relaxed_madd (ternary v128) - @relaxed_simd F32x4RelaxedNmadd => visit_f32x4_relaxed_nmadd (ternary v128) - @relaxed_simd F64x2RelaxedMadd => visit_f64x2_relaxed_madd (ternary v128) - @relaxed_simd F64x2RelaxedNmadd => visit_f64x2_relaxed_nmadd (ternary v128) - @relaxed_simd I8x16RelaxedLaneselect => visit_i8x16_relaxed_laneselect (ternary v128) - @relaxed_simd I16x8RelaxedLaneselect => visit_i16x8_relaxed_laneselect (ternary v128) - @relaxed_simd I32x4RelaxedLaneselect => visit_i32x4_relaxed_laneselect (ternary v128) - @relaxed_simd I64x2RelaxedLaneselect => visit_i64x2_relaxed_laneselect (ternary v128) - @relaxed_simd F32x4RelaxedMin => visit_f32x4_relaxed_min (binary v128) - @relaxed_simd F32x4RelaxedMax => visit_f32x4_relaxed_max (binary v128) - @relaxed_simd F64x2RelaxedMin => visit_f64x2_relaxed_min (binary v128) - @relaxed_simd F64x2RelaxedMax => visit_f64x2_relaxed_max (binary v128) - @relaxed_simd I16x8RelaxedQ15mulrS => visit_i16x8_relaxed_q15mulr_s (binary v128) - @relaxed_simd I16x8RelaxedDotI8x16I7x16S => visit_i16x8_relaxed_dot_i8x16_i7x16_s (binary v128) - @relaxed_simd I32x4RelaxedDotI8x16I7x16AddS => visit_i32x4_relaxed_dot_i8x16_i7x16_add_s (ternary v128) - } - }; -} +mod for_each_simd_op; macro_rules! format_err { ($offset:expr, $($arg:tt)*) => { diff --git a/crates/wasmparser/src/readers/core/operators.rs b/crates/wasmparser/src/readers/core/operators.rs index f25f57bc58..6b91282b4f 100644 --- a/crates/wasmparser/src/readers/core/operators.rs +++ b/crates/wasmparser/src/readers/core/operators.rs @@ -13,6 +13,8 @@ * limitations under the License. */ +#[cfg(feature = "simd")] +use crate::for_each_simd_operator; use crate::limits::{MAX_WASM_CATCHES, MAX_WASM_HANDLERS}; use crate::prelude::*; use crate::{BinaryReader, BinaryReaderError, FromReader, Result, ValType}; diff --git a/crates/wasmparser/src/validator/operators/simd.rs b/crates/wasmparser/src/validator/operators/simd.rs index 1d4e78d161..b52015a259 100644 --- a/crates/wasmparser/src/validator/operators/simd.rs +++ b/crates/wasmparser/src/validator/operators/simd.rs @@ -1,6 +1,5 @@ use super::OperatorValidatorTemp; use crate::{MemArg, Result, ValType, WasmModuleResources}; -#[cfg(feature = "simd")] use crate::{VisitSimdOperator, V128}; impl<'resources, R> OperatorValidatorTemp<'_, 'resources, R> From 6eef1162aa41c05aa6a9056ca55c6f38bebd5913 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 19 Nov 2024 17:01:12 +0100 Subject: [PATCH 26/83] fix docs for VisitSimdOperator --- crates/wasmparser/src/readers/core/operators.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/wasmparser/src/readers/core/operators.rs b/crates/wasmparser/src/readers/core/operators.rs index 6b91282b4f..6b31fdea4c 100644 --- a/crates/wasmparser/src/readers/core/operators.rs +++ b/crates/wasmparser/src/readers/core/operators.rs @@ -467,8 +467,7 @@ pub trait VisitOperator<'a> { for_each_operator!(define_visit_operator); } -/// Trait implemented by types that can visit all [`Operator`] variants. -#[allow(missing_docs)] +/// Trait implemented by types that can visit all [`SimdOperator`] variants. #[cfg(feature = "simd")] pub trait VisitSimdOperator<'a>: VisitOperator<'a> { /// Visits the SIMD [`Operator`] `op` using the given `offset`. From cc8846019b256a68f7c536175bf4345782cc678d Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 19 Nov 2024 17:04:34 +0100 Subject: [PATCH 27/83] allow missing docs again for VisitSimdOperator This is just like with VisitOperator trait. --- crates/wasmparser/src/readers/core/operators.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/wasmparser/src/readers/core/operators.rs b/crates/wasmparser/src/readers/core/operators.rs index 6b31fdea4c..c2cbb639e8 100644 --- a/crates/wasmparser/src/readers/core/operators.rs +++ b/crates/wasmparser/src/readers/core/operators.rs @@ -469,6 +469,7 @@ pub trait VisitOperator<'a> { /// Trait implemented by types that can visit all [`SimdOperator`] variants. #[cfg(feature = "simd")] +#[allow(missing_docs)] pub trait VisitSimdOperator<'a>: VisitOperator<'a> { /// Visits the SIMD [`Operator`] `op` using the given `offset`. /// From a7033407a7dab4b6b153c8fa59af66c25fae9da2 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 19 Nov 2024 23:19:33 +0100 Subject: [PATCH 28/83] add docs and example to VisitOperator::simd_visitor --- .../wasmparser/src/readers/core/operators.rs | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/crates/wasmparser/src/readers/core/operators.rs b/crates/wasmparser/src/readers/core/operators.rs index c2cbb639e8..d29a7d0399 100644 --- a/crates/wasmparser/src/readers/core/operators.rs +++ b/crates/wasmparser/src/readers/core/operators.rs @@ -459,6 +459,29 @@ pub trait VisitOperator<'a> { for_each_operator!(visit_operator) } + /// Returns a mutable reference to a [`VisitSimdOperator`] visitor. + /// + /// - If an implementer does _not_ want to support Wasm `simd` proposal + /// nothing has to be done since the default implementation already suffices. + /// - If an implementer _does_ want to support Wasm `simd` proposal this + /// method usually is implemented as `Some(self)` where the implementing + /// type (`Self`) typically also implements `VisitSimdOperator`. + /// + /// # Example + /// + /// ``` + /// impl VisitOperator for MyVisitor { + /// fn simd_visitor(&mut self) -> Option<&mut dyn VisitSimdOperator<'a, Output = Self::Output>> { + /// Some(self) + /// } + /// + /// // implement remaining visitation methods here ... + /// } + /// + /// impl VisitSimdOperator for MyVisitor { + /// // implement SIMD visitation methods here ... + /// } + /// ``` #[cfg(feature = "simd")] fn simd_visitor(&mut self) -> Option<&mut dyn VisitSimdOperator<'a, Output = Self::Output>> { None From e6614e0d3bff5b6c7b1d7a581622d1ed2cc4ce50 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 19 Nov 2024 23:26:33 +0100 Subject: [PATCH 29/83] move visit_0xfd_operator into separate file We do this to further improve compile times for when the `simd` crate feature is disabled. --- crates/wasmparser/src/binary_reader.rs | 320 +------------------- crates/wasmparser/src/binary_reader/simd.rs | 320 ++++++++++++++++++++ 2 files changed, 323 insertions(+), 317 deletions(-) create mode 100644 crates/wasmparser/src/binary_reader/simd.rs diff --git a/crates/wasmparser/src/binary_reader.rs b/crates/wasmparser/src/binary_reader.rs index 0e4d7f6e6e..1fc11051d2 100644 --- a/crates/wasmparser/src/binary_reader.rs +++ b/crates/wasmparser/src/binary_reader.rs @@ -13,6 +13,9 @@ * limitations under the License. */ +#[cfg(feature = "simd")] +mod simd; + #[cfg(feature = "simd")] use crate::for_each_simd_operator; use crate::prelude::*; @@ -1374,323 +1377,6 @@ impl<'a> BinaryReader<'a> { }) } - #[cfg(feature = "simd")] - fn visit_0xfd_operator( - &mut self, - pos: usize, - visitor: &mut T, - ) -> Result<>::Output> - where - T: VisitSimdOperator<'a>, - { - let code = self.read_var_u32()?; - Ok(match code { - 0x00 => visitor.visit_v128_load(self.read_memarg(4)?), - 0x01 => visitor.visit_v128_load8x8_s(self.read_memarg(3)?), - 0x02 => visitor.visit_v128_load8x8_u(self.read_memarg(3)?), - 0x03 => visitor.visit_v128_load16x4_s(self.read_memarg(3)?), - 0x04 => visitor.visit_v128_load16x4_u(self.read_memarg(3)?), - 0x05 => visitor.visit_v128_load32x2_s(self.read_memarg(3)?), - 0x06 => visitor.visit_v128_load32x2_u(self.read_memarg(3)?), - 0x07 => visitor.visit_v128_load8_splat(self.read_memarg(0)?), - 0x08 => visitor.visit_v128_load16_splat(self.read_memarg(1)?), - 0x09 => visitor.visit_v128_load32_splat(self.read_memarg(2)?), - 0x0a => visitor.visit_v128_load64_splat(self.read_memarg(3)?), - - 0x0b => visitor.visit_v128_store(self.read_memarg(4)?), - 0x0c => visitor.visit_v128_const(self.read_v128()?), - 0x0d => { - let mut lanes: [u8; 16] = [0; 16]; - for lane in &mut lanes { - *lane = self.read_lane_index(32)? - } - visitor.visit_i8x16_shuffle(lanes) - } - - 0x0e => visitor.visit_i8x16_swizzle(), - 0x0f => visitor.visit_i8x16_splat(), - 0x10 => visitor.visit_i16x8_splat(), - 0x11 => visitor.visit_i32x4_splat(), - 0x12 => visitor.visit_i64x2_splat(), - 0x13 => visitor.visit_f32x4_splat(), - 0x14 => visitor.visit_f64x2_splat(), - - 0x15 => visitor.visit_i8x16_extract_lane_s(self.read_lane_index(16)?), - 0x16 => visitor.visit_i8x16_extract_lane_u(self.read_lane_index(16)?), - 0x17 => visitor.visit_i8x16_replace_lane(self.read_lane_index(16)?), - 0x18 => visitor.visit_i16x8_extract_lane_s(self.read_lane_index(8)?), - 0x19 => visitor.visit_i16x8_extract_lane_u(self.read_lane_index(8)?), - 0x1a => visitor.visit_i16x8_replace_lane(self.read_lane_index(8)?), - 0x1b => visitor.visit_i32x4_extract_lane(self.read_lane_index(4)?), - - 0x1c => visitor.visit_i32x4_replace_lane(self.read_lane_index(4)?), - 0x1d => visitor.visit_i64x2_extract_lane(self.read_lane_index(2)?), - 0x1e => visitor.visit_i64x2_replace_lane(self.read_lane_index(2)?), - 0x1f => visitor.visit_f32x4_extract_lane(self.read_lane_index(4)?), - 0x20 => visitor.visit_f32x4_replace_lane(self.read_lane_index(4)?), - 0x21 => visitor.visit_f64x2_extract_lane(self.read_lane_index(2)?), - 0x22 => visitor.visit_f64x2_replace_lane(self.read_lane_index(2)?), - - 0x23 => visitor.visit_i8x16_eq(), - 0x24 => visitor.visit_i8x16_ne(), - 0x25 => visitor.visit_i8x16_lt_s(), - 0x26 => visitor.visit_i8x16_lt_u(), - 0x27 => visitor.visit_i8x16_gt_s(), - 0x28 => visitor.visit_i8x16_gt_u(), - 0x29 => visitor.visit_i8x16_le_s(), - 0x2a => visitor.visit_i8x16_le_u(), - 0x2b => visitor.visit_i8x16_ge_s(), - 0x2c => visitor.visit_i8x16_ge_u(), - 0x2d => visitor.visit_i16x8_eq(), - 0x2e => visitor.visit_i16x8_ne(), - 0x2f => visitor.visit_i16x8_lt_s(), - 0x30 => visitor.visit_i16x8_lt_u(), - 0x31 => visitor.visit_i16x8_gt_s(), - 0x32 => visitor.visit_i16x8_gt_u(), - 0x33 => visitor.visit_i16x8_le_s(), - 0x34 => visitor.visit_i16x8_le_u(), - 0x35 => visitor.visit_i16x8_ge_s(), - 0x36 => visitor.visit_i16x8_ge_u(), - 0x37 => visitor.visit_i32x4_eq(), - 0x38 => visitor.visit_i32x4_ne(), - 0x39 => visitor.visit_i32x4_lt_s(), - 0x3a => visitor.visit_i32x4_lt_u(), - 0x3b => visitor.visit_i32x4_gt_s(), - 0x3c => visitor.visit_i32x4_gt_u(), - 0x3d => visitor.visit_i32x4_le_s(), - 0x3e => visitor.visit_i32x4_le_u(), - 0x3f => visitor.visit_i32x4_ge_s(), - 0x40 => visitor.visit_i32x4_ge_u(), - 0x41 => visitor.visit_f32x4_eq(), - 0x42 => visitor.visit_f32x4_ne(), - 0x43 => visitor.visit_f32x4_lt(), - 0x44 => visitor.visit_f32x4_gt(), - 0x45 => visitor.visit_f32x4_le(), - 0x46 => visitor.visit_f32x4_ge(), - 0x47 => visitor.visit_f64x2_eq(), - 0x48 => visitor.visit_f64x2_ne(), - 0x49 => visitor.visit_f64x2_lt(), - 0x4a => visitor.visit_f64x2_gt(), - 0x4b => visitor.visit_f64x2_le(), - 0x4c => visitor.visit_f64x2_ge(), - 0x4d => visitor.visit_v128_not(), - 0x4e => visitor.visit_v128_and(), - 0x4f => visitor.visit_v128_andnot(), - 0x50 => visitor.visit_v128_or(), - 0x51 => visitor.visit_v128_xor(), - 0x52 => visitor.visit_v128_bitselect(), - 0x53 => visitor.visit_v128_any_true(), - - 0x54 => { - let memarg = self.read_memarg(0)?; - let lane = self.read_lane_index(16)?; - visitor.visit_v128_load8_lane(memarg, lane) - } - 0x55 => { - let memarg = self.read_memarg(1)?; - let lane = self.read_lane_index(8)?; - visitor.visit_v128_load16_lane(memarg, lane) - } - 0x56 => { - let memarg = self.read_memarg(2)?; - let lane = self.read_lane_index(4)?; - visitor.visit_v128_load32_lane(memarg, lane) - } - 0x57 => { - let memarg = self.read_memarg(3)?; - let lane = self.read_lane_index(2)?; - visitor.visit_v128_load64_lane(memarg, lane) - } - 0x58 => { - let memarg = self.read_memarg(0)?; - let lane = self.read_lane_index(16)?; - visitor.visit_v128_store8_lane(memarg, lane) - } - 0x59 => { - let memarg = self.read_memarg(1)?; - let lane = self.read_lane_index(8)?; - visitor.visit_v128_store16_lane(memarg, lane) - } - 0x5a => { - let memarg = self.read_memarg(2)?; - let lane = self.read_lane_index(4)?; - visitor.visit_v128_store32_lane(memarg, lane) - } - 0x5b => { - let memarg = self.read_memarg(3)?; - let lane = self.read_lane_index(2)?; - visitor.visit_v128_store64_lane(memarg, lane) - } - - 0x5c => visitor.visit_v128_load32_zero(self.read_memarg(2)?), - 0x5d => visitor.visit_v128_load64_zero(self.read_memarg(3)?), - 0x5e => visitor.visit_f32x4_demote_f64x2_zero(), - 0x5f => visitor.visit_f64x2_promote_low_f32x4(), - 0x60 => visitor.visit_i8x16_abs(), - 0x61 => visitor.visit_i8x16_neg(), - 0x62 => visitor.visit_i8x16_popcnt(), - 0x63 => visitor.visit_i8x16_all_true(), - 0x64 => visitor.visit_i8x16_bitmask(), - 0x65 => visitor.visit_i8x16_narrow_i16x8_s(), - 0x66 => visitor.visit_i8x16_narrow_i16x8_u(), - 0x67 => visitor.visit_f32x4_ceil(), - 0x68 => visitor.visit_f32x4_floor(), - 0x69 => visitor.visit_f32x4_trunc(), - 0x6a => visitor.visit_f32x4_nearest(), - 0x6b => visitor.visit_i8x16_shl(), - 0x6c => visitor.visit_i8x16_shr_s(), - 0x6d => visitor.visit_i8x16_shr_u(), - 0x6e => visitor.visit_i8x16_add(), - 0x6f => visitor.visit_i8x16_add_sat_s(), - 0x70 => visitor.visit_i8x16_add_sat_u(), - 0x71 => visitor.visit_i8x16_sub(), - 0x72 => visitor.visit_i8x16_sub_sat_s(), - 0x73 => visitor.visit_i8x16_sub_sat_u(), - 0x74 => visitor.visit_f64x2_ceil(), - 0x75 => visitor.visit_f64x2_floor(), - 0x76 => visitor.visit_i8x16_min_s(), - 0x77 => visitor.visit_i8x16_min_u(), - 0x78 => visitor.visit_i8x16_max_s(), - 0x79 => visitor.visit_i8x16_max_u(), - 0x7a => visitor.visit_f64x2_trunc(), - 0x7b => visitor.visit_i8x16_avgr_u(), - 0x7c => visitor.visit_i16x8_extadd_pairwise_i8x16_s(), - 0x7d => visitor.visit_i16x8_extadd_pairwise_i8x16_u(), - 0x7e => visitor.visit_i32x4_extadd_pairwise_i16x8_s(), - 0x7f => visitor.visit_i32x4_extadd_pairwise_i16x8_u(), - 0x80 => visitor.visit_i16x8_abs(), - 0x81 => visitor.visit_i16x8_neg(), - 0x82 => visitor.visit_i16x8_q15mulr_sat_s(), - 0x83 => visitor.visit_i16x8_all_true(), - 0x84 => visitor.visit_i16x8_bitmask(), - 0x85 => visitor.visit_i16x8_narrow_i32x4_s(), - 0x86 => visitor.visit_i16x8_narrow_i32x4_u(), - 0x87 => visitor.visit_i16x8_extend_low_i8x16_s(), - 0x88 => visitor.visit_i16x8_extend_high_i8x16_s(), - 0x89 => visitor.visit_i16x8_extend_low_i8x16_u(), - 0x8a => visitor.visit_i16x8_extend_high_i8x16_u(), - 0x8b => visitor.visit_i16x8_shl(), - 0x8c => visitor.visit_i16x8_shr_s(), - 0x8d => visitor.visit_i16x8_shr_u(), - 0x8e => visitor.visit_i16x8_add(), - 0x8f => visitor.visit_i16x8_add_sat_s(), - 0x90 => visitor.visit_i16x8_add_sat_u(), - 0x91 => visitor.visit_i16x8_sub(), - 0x92 => visitor.visit_i16x8_sub_sat_s(), - 0x93 => visitor.visit_i16x8_sub_sat_u(), - 0x94 => visitor.visit_f64x2_nearest(), - 0x95 => visitor.visit_i16x8_mul(), - 0x96 => visitor.visit_i16x8_min_s(), - 0x97 => visitor.visit_i16x8_min_u(), - 0x98 => visitor.visit_i16x8_max_s(), - 0x99 => visitor.visit_i16x8_max_u(), - 0x9b => visitor.visit_i16x8_avgr_u(), - 0x9c => visitor.visit_i16x8_extmul_low_i8x16_s(), - 0x9d => visitor.visit_i16x8_extmul_high_i8x16_s(), - 0x9e => visitor.visit_i16x8_extmul_low_i8x16_u(), - 0x9f => visitor.visit_i16x8_extmul_high_i8x16_u(), - 0xa0 => visitor.visit_i32x4_abs(), - 0xa1 => visitor.visit_i32x4_neg(), - 0xa3 => visitor.visit_i32x4_all_true(), - 0xa4 => visitor.visit_i32x4_bitmask(), - 0xa7 => visitor.visit_i32x4_extend_low_i16x8_s(), - 0xa8 => visitor.visit_i32x4_extend_high_i16x8_s(), - 0xa9 => visitor.visit_i32x4_extend_low_i16x8_u(), - 0xaa => visitor.visit_i32x4_extend_high_i16x8_u(), - 0xab => visitor.visit_i32x4_shl(), - 0xac => visitor.visit_i32x4_shr_s(), - 0xad => visitor.visit_i32x4_shr_u(), - 0xae => visitor.visit_i32x4_add(), - 0xb1 => visitor.visit_i32x4_sub(), - 0xb5 => visitor.visit_i32x4_mul(), - 0xb6 => visitor.visit_i32x4_min_s(), - 0xb7 => visitor.visit_i32x4_min_u(), - 0xb8 => visitor.visit_i32x4_max_s(), - 0xb9 => visitor.visit_i32x4_max_u(), - 0xba => visitor.visit_i32x4_dot_i16x8_s(), - 0xbc => visitor.visit_i32x4_extmul_low_i16x8_s(), - 0xbd => visitor.visit_i32x4_extmul_high_i16x8_s(), - 0xbe => visitor.visit_i32x4_extmul_low_i16x8_u(), - 0xbf => visitor.visit_i32x4_extmul_high_i16x8_u(), - 0xc0 => visitor.visit_i64x2_abs(), - 0xc1 => visitor.visit_i64x2_neg(), - 0xc3 => visitor.visit_i64x2_all_true(), - 0xc4 => visitor.visit_i64x2_bitmask(), - 0xc7 => visitor.visit_i64x2_extend_low_i32x4_s(), - 0xc8 => visitor.visit_i64x2_extend_high_i32x4_s(), - 0xc9 => visitor.visit_i64x2_extend_low_i32x4_u(), - 0xca => visitor.visit_i64x2_extend_high_i32x4_u(), - 0xcb => visitor.visit_i64x2_shl(), - 0xcc => visitor.visit_i64x2_shr_s(), - 0xcd => visitor.visit_i64x2_shr_u(), - 0xce => visitor.visit_i64x2_add(), - 0xd1 => visitor.visit_i64x2_sub(), - 0xd5 => visitor.visit_i64x2_mul(), - 0xd6 => visitor.visit_i64x2_eq(), - 0xd7 => visitor.visit_i64x2_ne(), - 0xd8 => visitor.visit_i64x2_lt_s(), - 0xd9 => visitor.visit_i64x2_gt_s(), - 0xda => visitor.visit_i64x2_le_s(), - 0xdb => visitor.visit_i64x2_ge_s(), - 0xdc => visitor.visit_i64x2_extmul_low_i32x4_s(), - 0xdd => visitor.visit_i64x2_extmul_high_i32x4_s(), - 0xde => visitor.visit_i64x2_extmul_low_i32x4_u(), - 0xdf => visitor.visit_i64x2_extmul_high_i32x4_u(), - 0xe0 => visitor.visit_f32x4_abs(), - 0xe1 => visitor.visit_f32x4_neg(), - 0xe3 => visitor.visit_f32x4_sqrt(), - 0xe4 => visitor.visit_f32x4_add(), - 0xe5 => visitor.visit_f32x4_sub(), - 0xe6 => visitor.visit_f32x4_mul(), - 0xe7 => visitor.visit_f32x4_div(), - 0xe8 => visitor.visit_f32x4_min(), - 0xe9 => visitor.visit_f32x4_max(), - 0xea => visitor.visit_f32x4_pmin(), - 0xeb => visitor.visit_f32x4_pmax(), - 0xec => visitor.visit_f64x2_abs(), - 0xed => visitor.visit_f64x2_neg(), - 0xef => visitor.visit_f64x2_sqrt(), - 0xf0 => visitor.visit_f64x2_add(), - 0xf1 => visitor.visit_f64x2_sub(), - 0xf2 => visitor.visit_f64x2_mul(), - 0xf3 => visitor.visit_f64x2_div(), - 0xf4 => visitor.visit_f64x2_min(), - 0xf5 => visitor.visit_f64x2_max(), - 0xf6 => visitor.visit_f64x2_pmin(), - 0xf7 => visitor.visit_f64x2_pmax(), - 0xf8 => visitor.visit_i32x4_trunc_sat_f32x4_s(), - 0xf9 => visitor.visit_i32x4_trunc_sat_f32x4_u(), - 0xfa => visitor.visit_f32x4_convert_i32x4_s(), - 0xfb => visitor.visit_f32x4_convert_i32x4_u(), - 0xfc => visitor.visit_i32x4_trunc_sat_f64x2_s_zero(), - 0xfd => visitor.visit_i32x4_trunc_sat_f64x2_u_zero(), - 0xfe => visitor.visit_f64x2_convert_low_i32x4_s(), - 0xff => visitor.visit_f64x2_convert_low_i32x4_u(), - 0x100 => visitor.visit_i8x16_relaxed_swizzle(), - 0x101 => visitor.visit_i32x4_relaxed_trunc_f32x4_s(), - 0x102 => visitor.visit_i32x4_relaxed_trunc_f32x4_u(), - 0x103 => visitor.visit_i32x4_relaxed_trunc_f64x2_s_zero(), - 0x104 => visitor.visit_i32x4_relaxed_trunc_f64x2_u_zero(), - 0x105 => visitor.visit_f32x4_relaxed_madd(), - 0x106 => visitor.visit_f32x4_relaxed_nmadd(), - 0x107 => visitor.visit_f64x2_relaxed_madd(), - 0x108 => visitor.visit_f64x2_relaxed_nmadd(), - 0x109 => visitor.visit_i8x16_relaxed_laneselect(), - 0x10a => visitor.visit_i16x8_relaxed_laneselect(), - 0x10b => visitor.visit_i32x4_relaxed_laneselect(), - 0x10c => visitor.visit_i64x2_relaxed_laneselect(), - 0x10d => visitor.visit_f32x4_relaxed_min(), - 0x10e => visitor.visit_f32x4_relaxed_max(), - 0x10f => visitor.visit_f64x2_relaxed_min(), - 0x110 => visitor.visit_f64x2_relaxed_max(), - 0x111 => visitor.visit_i16x8_relaxed_q15mulr_s(), - 0x112 => visitor.visit_i16x8_relaxed_dot_i8x16_i7x16_s(), - 0x113 => visitor.visit_i32x4_relaxed_dot_i8x16_i7x16_add_s(), - - _ => bail!(pos, "unknown 0xfd subopcode: 0x{code:x}"), - }) - } - fn visit_0xfe_operator( &mut self, pos: usize, diff --git a/crates/wasmparser/src/binary_reader/simd.rs b/crates/wasmparser/src/binary_reader/simd.rs new file mode 100644 index 0000000000..35179664d2 --- /dev/null +++ b/crates/wasmparser/src/binary_reader/simd.rs @@ -0,0 +1,320 @@ +use crate::{Result, VisitOperator, VisitSimdOperator}; +use super::BinaryReader; + +impl<'a> BinaryReader<'a> { + pub(super) fn visit_0xfd_operator( + &mut self, + pos: usize, + visitor: &mut T, + ) -> Result<>::Output> + where + T: VisitSimdOperator<'a>, + { + let code = self.read_var_u32()?; + Ok(match code { + 0x00 => visitor.visit_v128_load(self.read_memarg(4)?), + 0x01 => visitor.visit_v128_load8x8_s(self.read_memarg(3)?), + 0x02 => visitor.visit_v128_load8x8_u(self.read_memarg(3)?), + 0x03 => visitor.visit_v128_load16x4_s(self.read_memarg(3)?), + 0x04 => visitor.visit_v128_load16x4_u(self.read_memarg(3)?), + 0x05 => visitor.visit_v128_load32x2_s(self.read_memarg(3)?), + 0x06 => visitor.visit_v128_load32x2_u(self.read_memarg(3)?), + 0x07 => visitor.visit_v128_load8_splat(self.read_memarg(0)?), + 0x08 => visitor.visit_v128_load16_splat(self.read_memarg(1)?), + 0x09 => visitor.visit_v128_load32_splat(self.read_memarg(2)?), + 0x0a => visitor.visit_v128_load64_splat(self.read_memarg(3)?), + + 0x0b => visitor.visit_v128_store(self.read_memarg(4)?), + 0x0c => visitor.visit_v128_const(self.read_v128()?), + 0x0d => { + let mut lanes: [u8; 16] = [0; 16]; + for lane in &mut lanes { + *lane = self.read_lane_index(32)? + } + visitor.visit_i8x16_shuffle(lanes) + } + + 0x0e => visitor.visit_i8x16_swizzle(), + 0x0f => visitor.visit_i8x16_splat(), + 0x10 => visitor.visit_i16x8_splat(), + 0x11 => visitor.visit_i32x4_splat(), + 0x12 => visitor.visit_i64x2_splat(), + 0x13 => visitor.visit_f32x4_splat(), + 0x14 => visitor.visit_f64x2_splat(), + + 0x15 => visitor.visit_i8x16_extract_lane_s(self.read_lane_index(16)?), + 0x16 => visitor.visit_i8x16_extract_lane_u(self.read_lane_index(16)?), + 0x17 => visitor.visit_i8x16_replace_lane(self.read_lane_index(16)?), + 0x18 => visitor.visit_i16x8_extract_lane_s(self.read_lane_index(8)?), + 0x19 => visitor.visit_i16x8_extract_lane_u(self.read_lane_index(8)?), + 0x1a => visitor.visit_i16x8_replace_lane(self.read_lane_index(8)?), + 0x1b => visitor.visit_i32x4_extract_lane(self.read_lane_index(4)?), + + 0x1c => visitor.visit_i32x4_replace_lane(self.read_lane_index(4)?), + 0x1d => visitor.visit_i64x2_extract_lane(self.read_lane_index(2)?), + 0x1e => visitor.visit_i64x2_replace_lane(self.read_lane_index(2)?), + 0x1f => visitor.visit_f32x4_extract_lane(self.read_lane_index(4)?), + 0x20 => visitor.visit_f32x4_replace_lane(self.read_lane_index(4)?), + 0x21 => visitor.visit_f64x2_extract_lane(self.read_lane_index(2)?), + 0x22 => visitor.visit_f64x2_replace_lane(self.read_lane_index(2)?), + + 0x23 => visitor.visit_i8x16_eq(), + 0x24 => visitor.visit_i8x16_ne(), + 0x25 => visitor.visit_i8x16_lt_s(), + 0x26 => visitor.visit_i8x16_lt_u(), + 0x27 => visitor.visit_i8x16_gt_s(), + 0x28 => visitor.visit_i8x16_gt_u(), + 0x29 => visitor.visit_i8x16_le_s(), + 0x2a => visitor.visit_i8x16_le_u(), + 0x2b => visitor.visit_i8x16_ge_s(), + 0x2c => visitor.visit_i8x16_ge_u(), + 0x2d => visitor.visit_i16x8_eq(), + 0x2e => visitor.visit_i16x8_ne(), + 0x2f => visitor.visit_i16x8_lt_s(), + 0x30 => visitor.visit_i16x8_lt_u(), + 0x31 => visitor.visit_i16x8_gt_s(), + 0x32 => visitor.visit_i16x8_gt_u(), + 0x33 => visitor.visit_i16x8_le_s(), + 0x34 => visitor.visit_i16x8_le_u(), + 0x35 => visitor.visit_i16x8_ge_s(), + 0x36 => visitor.visit_i16x8_ge_u(), + 0x37 => visitor.visit_i32x4_eq(), + 0x38 => visitor.visit_i32x4_ne(), + 0x39 => visitor.visit_i32x4_lt_s(), + 0x3a => visitor.visit_i32x4_lt_u(), + 0x3b => visitor.visit_i32x4_gt_s(), + 0x3c => visitor.visit_i32x4_gt_u(), + 0x3d => visitor.visit_i32x4_le_s(), + 0x3e => visitor.visit_i32x4_le_u(), + 0x3f => visitor.visit_i32x4_ge_s(), + 0x40 => visitor.visit_i32x4_ge_u(), + 0x41 => visitor.visit_f32x4_eq(), + 0x42 => visitor.visit_f32x4_ne(), + 0x43 => visitor.visit_f32x4_lt(), + 0x44 => visitor.visit_f32x4_gt(), + 0x45 => visitor.visit_f32x4_le(), + 0x46 => visitor.visit_f32x4_ge(), + 0x47 => visitor.visit_f64x2_eq(), + 0x48 => visitor.visit_f64x2_ne(), + 0x49 => visitor.visit_f64x2_lt(), + 0x4a => visitor.visit_f64x2_gt(), + 0x4b => visitor.visit_f64x2_le(), + 0x4c => visitor.visit_f64x2_ge(), + 0x4d => visitor.visit_v128_not(), + 0x4e => visitor.visit_v128_and(), + 0x4f => visitor.visit_v128_andnot(), + 0x50 => visitor.visit_v128_or(), + 0x51 => visitor.visit_v128_xor(), + 0x52 => visitor.visit_v128_bitselect(), + 0x53 => visitor.visit_v128_any_true(), + + 0x54 => { + let memarg = self.read_memarg(0)?; + let lane = self.read_lane_index(16)?; + visitor.visit_v128_load8_lane(memarg, lane) + } + 0x55 => { + let memarg = self.read_memarg(1)?; + let lane = self.read_lane_index(8)?; + visitor.visit_v128_load16_lane(memarg, lane) + } + 0x56 => { + let memarg = self.read_memarg(2)?; + let lane = self.read_lane_index(4)?; + visitor.visit_v128_load32_lane(memarg, lane) + } + 0x57 => { + let memarg = self.read_memarg(3)?; + let lane = self.read_lane_index(2)?; + visitor.visit_v128_load64_lane(memarg, lane) + } + 0x58 => { + let memarg = self.read_memarg(0)?; + let lane = self.read_lane_index(16)?; + visitor.visit_v128_store8_lane(memarg, lane) + } + 0x59 => { + let memarg = self.read_memarg(1)?; + let lane = self.read_lane_index(8)?; + visitor.visit_v128_store16_lane(memarg, lane) + } + 0x5a => { + let memarg = self.read_memarg(2)?; + let lane = self.read_lane_index(4)?; + visitor.visit_v128_store32_lane(memarg, lane) + } + 0x5b => { + let memarg = self.read_memarg(3)?; + let lane = self.read_lane_index(2)?; + visitor.visit_v128_store64_lane(memarg, lane) + } + + 0x5c => visitor.visit_v128_load32_zero(self.read_memarg(2)?), + 0x5d => visitor.visit_v128_load64_zero(self.read_memarg(3)?), + 0x5e => visitor.visit_f32x4_demote_f64x2_zero(), + 0x5f => visitor.visit_f64x2_promote_low_f32x4(), + 0x60 => visitor.visit_i8x16_abs(), + 0x61 => visitor.visit_i8x16_neg(), + 0x62 => visitor.visit_i8x16_popcnt(), + 0x63 => visitor.visit_i8x16_all_true(), + 0x64 => visitor.visit_i8x16_bitmask(), + 0x65 => visitor.visit_i8x16_narrow_i16x8_s(), + 0x66 => visitor.visit_i8x16_narrow_i16x8_u(), + 0x67 => visitor.visit_f32x4_ceil(), + 0x68 => visitor.visit_f32x4_floor(), + 0x69 => visitor.visit_f32x4_trunc(), + 0x6a => visitor.visit_f32x4_nearest(), + 0x6b => visitor.visit_i8x16_shl(), + 0x6c => visitor.visit_i8x16_shr_s(), + 0x6d => visitor.visit_i8x16_shr_u(), + 0x6e => visitor.visit_i8x16_add(), + 0x6f => visitor.visit_i8x16_add_sat_s(), + 0x70 => visitor.visit_i8x16_add_sat_u(), + 0x71 => visitor.visit_i8x16_sub(), + 0x72 => visitor.visit_i8x16_sub_sat_s(), + 0x73 => visitor.visit_i8x16_sub_sat_u(), + 0x74 => visitor.visit_f64x2_ceil(), + 0x75 => visitor.visit_f64x2_floor(), + 0x76 => visitor.visit_i8x16_min_s(), + 0x77 => visitor.visit_i8x16_min_u(), + 0x78 => visitor.visit_i8x16_max_s(), + 0x79 => visitor.visit_i8x16_max_u(), + 0x7a => visitor.visit_f64x2_trunc(), + 0x7b => visitor.visit_i8x16_avgr_u(), + 0x7c => visitor.visit_i16x8_extadd_pairwise_i8x16_s(), + 0x7d => visitor.visit_i16x8_extadd_pairwise_i8x16_u(), + 0x7e => visitor.visit_i32x4_extadd_pairwise_i16x8_s(), + 0x7f => visitor.visit_i32x4_extadd_pairwise_i16x8_u(), + 0x80 => visitor.visit_i16x8_abs(), + 0x81 => visitor.visit_i16x8_neg(), + 0x82 => visitor.visit_i16x8_q15mulr_sat_s(), + 0x83 => visitor.visit_i16x8_all_true(), + 0x84 => visitor.visit_i16x8_bitmask(), + 0x85 => visitor.visit_i16x8_narrow_i32x4_s(), + 0x86 => visitor.visit_i16x8_narrow_i32x4_u(), + 0x87 => visitor.visit_i16x8_extend_low_i8x16_s(), + 0x88 => visitor.visit_i16x8_extend_high_i8x16_s(), + 0x89 => visitor.visit_i16x8_extend_low_i8x16_u(), + 0x8a => visitor.visit_i16x8_extend_high_i8x16_u(), + 0x8b => visitor.visit_i16x8_shl(), + 0x8c => visitor.visit_i16x8_shr_s(), + 0x8d => visitor.visit_i16x8_shr_u(), + 0x8e => visitor.visit_i16x8_add(), + 0x8f => visitor.visit_i16x8_add_sat_s(), + 0x90 => visitor.visit_i16x8_add_sat_u(), + 0x91 => visitor.visit_i16x8_sub(), + 0x92 => visitor.visit_i16x8_sub_sat_s(), + 0x93 => visitor.visit_i16x8_sub_sat_u(), + 0x94 => visitor.visit_f64x2_nearest(), + 0x95 => visitor.visit_i16x8_mul(), + 0x96 => visitor.visit_i16x8_min_s(), + 0x97 => visitor.visit_i16x8_min_u(), + 0x98 => visitor.visit_i16x8_max_s(), + 0x99 => visitor.visit_i16x8_max_u(), + 0x9b => visitor.visit_i16x8_avgr_u(), + 0x9c => visitor.visit_i16x8_extmul_low_i8x16_s(), + 0x9d => visitor.visit_i16x8_extmul_high_i8x16_s(), + 0x9e => visitor.visit_i16x8_extmul_low_i8x16_u(), + 0x9f => visitor.visit_i16x8_extmul_high_i8x16_u(), + 0xa0 => visitor.visit_i32x4_abs(), + 0xa1 => visitor.visit_i32x4_neg(), + 0xa3 => visitor.visit_i32x4_all_true(), + 0xa4 => visitor.visit_i32x4_bitmask(), + 0xa7 => visitor.visit_i32x4_extend_low_i16x8_s(), + 0xa8 => visitor.visit_i32x4_extend_high_i16x8_s(), + 0xa9 => visitor.visit_i32x4_extend_low_i16x8_u(), + 0xaa => visitor.visit_i32x4_extend_high_i16x8_u(), + 0xab => visitor.visit_i32x4_shl(), + 0xac => visitor.visit_i32x4_shr_s(), + 0xad => visitor.visit_i32x4_shr_u(), + 0xae => visitor.visit_i32x4_add(), + 0xb1 => visitor.visit_i32x4_sub(), + 0xb5 => visitor.visit_i32x4_mul(), + 0xb6 => visitor.visit_i32x4_min_s(), + 0xb7 => visitor.visit_i32x4_min_u(), + 0xb8 => visitor.visit_i32x4_max_s(), + 0xb9 => visitor.visit_i32x4_max_u(), + 0xba => visitor.visit_i32x4_dot_i16x8_s(), + 0xbc => visitor.visit_i32x4_extmul_low_i16x8_s(), + 0xbd => visitor.visit_i32x4_extmul_high_i16x8_s(), + 0xbe => visitor.visit_i32x4_extmul_low_i16x8_u(), + 0xbf => visitor.visit_i32x4_extmul_high_i16x8_u(), + 0xc0 => visitor.visit_i64x2_abs(), + 0xc1 => visitor.visit_i64x2_neg(), + 0xc3 => visitor.visit_i64x2_all_true(), + 0xc4 => visitor.visit_i64x2_bitmask(), + 0xc7 => visitor.visit_i64x2_extend_low_i32x4_s(), + 0xc8 => visitor.visit_i64x2_extend_high_i32x4_s(), + 0xc9 => visitor.visit_i64x2_extend_low_i32x4_u(), + 0xca => visitor.visit_i64x2_extend_high_i32x4_u(), + 0xcb => visitor.visit_i64x2_shl(), + 0xcc => visitor.visit_i64x2_shr_s(), + 0xcd => visitor.visit_i64x2_shr_u(), + 0xce => visitor.visit_i64x2_add(), + 0xd1 => visitor.visit_i64x2_sub(), + 0xd5 => visitor.visit_i64x2_mul(), + 0xd6 => visitor.visit_i64x2_eq(), + 0xd7 => visitor.visit_i64x2_ne(), + 0xd8 => visitor.visit_i64x2_lt_s(), + 0xd9 => visitor.visit_i64x2_gt_s(), + 0xda => visitor.visit_i64x2_le_s(), + 0xdb => visitor.visit_i64x2_ge_s(), + 0xdc => visitor.visit_i64x2_extmul_low_i32x4_s(), + 0xdd => visitor.visit_i64x2_extmul_high_i32x4_s(), + 0xde => visitor.visit_i64x2_extmul_low_i32x4_u(), + 0xdf => visitor.visit_i64x2_extmul_high_i32x4_u(), + 0xe0 => visitor.visit_f32x4_abs(), + 0xe1 => visitor.visit_f32x4_neg(), + 0xe3 => visitor.visit_f32x4_sqrt(), + 0xe4 => visitor.visit_f32x4_add(), + 0xe5 => visitor.visit_f32x4_sub(), + 0xe6 => visitor.visit_f32x4_mul(), + 0xe7 => visitor.visit_f32x4_div(), + 0xe8 => visitor.visit_f32x4_min(), + 0xe9 => visitor.visit_f32x4_max(), + 0xea => visitor.visit_f32x4_pmin(), + 0xeb => visitor.visit_f32x4_pmax(), + 0xec => visitor.visit_f64x2_abs(), + 0xed => visitor.visit_f64x2_neg(), + 0xef => visitor.visit_f64x2_sqrt(), + 0xf0 => visitor.visit_f64x2_add(), + 0xf1 => visitor.visit_f64x2_sub(), + 0xf2 => visitor.visit_f64x2_mul(), + 0xf3 => visitor.visit_f64x2_div(), + 0xf4 => visitor.visit_f64x2_min(), + 0xf5 => visitor.visit_f64x2_max(), + 0xf6 => visitor.visit_f64x2_pmin(), + 0xf7 => visitor.visit_f64x2_pmax(), + 0xf8 => visitor.visit_i32x4_trunc_sat_f32x4_s(), + 0xf9 => visitor.visit_i32x4_trunc_sat_f32x4_u(), + 0xfa => visitor.visit_f32x4_convert_i32x4_s(), + 0xfb => visitor.visit_f32x4_convert_i32x4_u(), + 0xfc => visitor.visit_i32x4_trunc_sat_f64x2_s_zero(), + 0xfd => visitor.visit_i32x4_trunc_sat_f64x2_u_zero(), + 0xfe => visitor.visit_f64x2_convert_low_i32x4_s(), + 0xff => visitor.visit_f64x2_convert_low_i32x4_u(), + 0x100 => visitor.visit_i8x16_relaxed_swizzle(), + 0x101 => visitor.visit_i32x4_relaxed_trunc_f32x4_s(), + 0x102 => visitor.visit_i32x4_relaxed_trunc_f32x4_u(), + 0x103 => visitor.visit_i32x4_relaxed_trunc_f64x2_s_zero(), + 0x104 => visitor.visit_i32x4_relaxed_trunc_f64x2_u_zero(), + 0x105 => visitor.visit_f32x4_relaxed_madd(), + 0x106 => visitor.visit_f32x4_relaxed_nmadd(), + 0x107 => visitor.visit_f64x2_relaxed_madd(), + 0x108 => visitor.visit_f64x2_relaxed_nmadd(), + 0x109 => visitor.visit_i8x16_relaxed_laneselect(), + 0x10a => visitor.visit_i16x8_relaxed_laneselect(), + 0x10b => visitor.visit_i32x4_relaxed_laneselect(), + 0x10c => visitor.visit_i64x2_relaxed_laneselect(), + 0x10d => visitor.visit_f32x4_relaxed_min(), + 0x10e => visitor.visit_f32x4_relaxed_max(), + 0x10f => visitor.visit_f64x2_relaxed_min(), + 0x110 => visitor.visit_f64x2_relaxed_max(), + 0x111 => visitor.visit_i16x8_relaxed_q15mulr_s(), + 0x112 => visitor.visit_i16x8_relaxed_dot_i8x16_i7x16_s(), + 0x113 => visitor.visit_i32x4_relaxed_dot_i8x16_i7x16_add_s(), + + _ => bail!(pos, "unknown 0xfd subopcode: 0x{code:x}"), + }) + } +} \ No newline at end of file From 3c9559c4d47869828b73b0209e9c5600cf9dd280 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 19 Nov 2024 23:26:50 +0100 Subject: [PATCH 30/83] apply rustftm --- crates/wasmparser/src/binary_reader/simd.rs | 4 ++-- crates/wasmparser/src/readers/core/operators.rs | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/crates/wasmparser/src/binary_reader/simd.rs b/crates/wasmparser/src/binary_reader/simd.rs index 35179664d2..7d5854c582 100644 --- a/crates/wasmparser/src/binary_reader/simd.rs +++ b/crates/wasmparser/src/binary_reader/simd.rs @@ -1,5 +1,5 @@ -use crate::{Result, VisitOperator, VisitSimdOperator}; use super::BinaryReader; +use crate::{Result, VisitOperator, VisitSimdOperator}; impl<'a> BinaryReader<'a> { pub(super) fn visit_0xfd_operator( @@ -317,4 +317,4 @@ impl<'a> BinaryReader<'a> { _ => bail!(pos, "unknown 0xfd subopcode: 0x{code:x}"), }) } -} \ No newline at end of file +} diff --git a/crates/wasmparser/src/readers/core/operators.rs b/crates/wasmparser/src/readers/core/operators.rs index d29a7d0399..d774eebb81 100644 --- a/crates/wasmparser/src/readers/core/operators.rs +++ b/crates/wasmparser/src/readers/core/operators.rs @@ -460,24 +460,24 @@ pub trait VisitOperator<'a> { } /// Returns a mutable reference to a [`VisitSimdOperator`] visitor. - /// + /// /// - If an implementer does _not_ want to support Wasm `simd` proposal /// nothing has to be done since the default implementation already suffices. /// - If an implementer _does_ want to support Wasm `simd` proposal this /// method usually is implemented as `Some(self)` where the implementing /// type (`Self`) typically also implements `VisitSimdOperator`. - /// + /// /// # Example - /// + /// /// ``` /// impl VisitOperator for MyVisitor { /// fn simd_visitor(&mut self) -> Option<&mut dyn VisitSimdOperator<'a, Output = Self::Output>> { /// Some(self) /// } - /// + /// /// // implement remaining visitation methods here ... /// } - /// + /// /// impl VisitSimdOperator for MyVisitor { /// // implement SIMD visitation methods here ... /// } From a8f67fe4a5a1bcf545c5994cc133129680f8ea13 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 19 Nov 2024 23:33:34 +0100 Subject: [PATCH 31/83] wasmprinter: fix remaining simd feature toggles --- crates/wasmprinter/src/operator.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/crates/wasmprinter/src/operator.rs b/crates/wasmprinter/src/operator.rs index 63f099f231..3767aa54dc 100644 --- a/crates/wasmprinter/src/operator.rs +++ b/crates/wasmprinter/src/operator.rs @@ -4,8 +4,10 @@ use termcolor::{Ansi, NoColor}; use wasmparser::{ BinaryReader, BlockType, BrTable, Catch, CompositeInnerType, ContType, FrameKind, FuncType, Handle, MemArg, ModuleArity, Operator, Ordering, RefType, ResumeTable, SubType, TryTable, - VisitOperator, VisitSimdOperator, + VisitOperator, }; +#[cfg(feature = "simd")] +use wasmparser::VisitSimdOperator; pub struct OperatorState { op_offset: usize, @@ -385,11 +387,13 @@ impl<'printer, 'state, 'a, 'b> PrintOperator<'printer, 'state, 'a, 'b> { self.printer.print_idx(&self.state.core.element_names, idx) } + #[cfg(feature = "simd")] fn lane(&mut self, lane: u8) -> Result<()> { write!(self.result(), " {lane}")?; Ok(()) } + #[cfg(feature = "simd")] fn lanes(&mut self, lanes: [u8; 16]) -> Result<()> { for lane in lanes.iter() { write!(self.result(), " {lane}")?; From c9204e6c1952626ebdeffddb2101afbf2884dea3 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 19 Nov 2024 23:33:49 +0100 Subject: [PATCH 32/83] apply rustfmt --- crates/wasmprinter/src/operator.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/wasmprinter/src/operator.rs b/crates/wasmprinter/src/operator.rs index 3767aa54dc..7b3ea3f9e0 100644 --- a/crates/wasmprinter/src/operator.rs +++ b/crates/wasmprinter/src/operator.rs @@ -1,13 +1,13 @@ use super::{Config, Print, PrintTermcolor, Printer, State}; use anyhow::{anyhow, bail, Result}; use termcolor::{Ansi, NoColor}; +#[cfg(feature = "simd")] +use wasmparser::VisitSimdOperator; use wasmparser::{ BinaryReader, BlockType, BrTable, Catch, CompositeInnerType, ContType, FrameKind, FuncType, Handle, MemArg, ModuleArity, Operator, Ordering, RefType, ResumeTable, SubType, TryTable, VisitOperator, }; -#[cfg(feature = "simd")] -use wasmparser::VisitSimdOperator; pub struct OperatorState { op_offset: usize, From 27551806a181a25df3f30c9c31015753a9a8d337 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 19 Nov 2024 23:43:42 +0100 Subject: [PATCH 33/83] fix wasmparser benchmarks --- crates/wasmparser/benches/benchmark.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/crates/wasmparser/benches/benchmark.rs b/crates/wasmparser/benches/benchmark.rs index 2b0a3b5ef9..af2bc7d84f 100644 --- a/crates/wasmparser/benches/benchmark.rs +++ b/crates/wasmparser/benches/benchmark.rs @@ -369,8 +369,6 @@ impl<'a> VisitOperator<'a> for NopVisit { } #[allow(unused_variables)] -impl<'a> VisitSimdOperator for NopVisit { - type Output = (); - +impl<'a> VisitSimdOperator<'a> for NopVisit { wasmparser::for_each_simd_operator!(define_visit_operator); } From accb1137c92c3fd2242e63fbf82e6fbf3fa9217d Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Wed, 20 Nov 2024 00:07:48 +0100 Subject: [PATCH 34/83] wasm-encoder: add simd support --- crates/wasm-encoder/Cargo.toml | 4 +- crates/wasm-encoder/src/reencode.rs | 173 ++++++++++++++++------------ 2 files changed, 104 insertions(+), 73 deletions(-) diff --git a/crates/wasm-encoder/Cargo.toml b/crates/wasm-encoder/Cargo.toml index f547e9ae19..e5ce3b6141 100644 --- a/crates/wasm-encoder/Cargo.toml +++ b/crates/wasm-encoder/Cargo.toml @@ -33,8 +33,10 @@ wasmparser = { path = "../wasmparser" } wasmprinter = { workspace = true } [features] -default = ['component-model'] +default = ['component-model', 'simd'] # On-by-default: conditional support for emitting components in addition to # core modules. component-model = ['wasmparser?/component-model'] +# On-by-default: conditional support for emitting SIMD wasm operators. +simd = ['wasmparser?/simd'] diff --git a/crates/wasm-encoder/src/reencode.rs b/crates/wasm-encoder/src/reencode.rs index bba2f9c2bf..8d31a29a57 100644 --- a/crates/wasm-encoder/src/reencode.rs +++ b/crates/wasm-encoder/src/reencode.rs @@ -603,6 +603,7 @@ pub mod utils { use super::{Error, Reencode}; use crate::{CoreTypeEncoder, Encode}; use std::ops::Range; + use crate::Instruction; pub fn parse_core_module( reencoder: &mut T, @@ -1546,98 +1547,126 @@ pub mod utils { } } + macro_rules! translate_map { + // This case is used to map, based on the name of the field, from the + // wasmparser payload type to the wasm-encoder payload type through + // `Translator` as applicable. + ($reencoder:ident $arg:ident tag_index) => ($reencoder.tag_index($arg)); + ($reencoder:ident $arg:ident function_index) => ($reencoder.function_index($arg)); + ($reencoder:ident $arg:ident table) => ($reencoder.table_index($arg)); + ($reencoder:ident $arg:ident table_index) => ($reencoder.table_index($arg)); + ($reencoder:ident $arg:ident dst_table) => ($reencoder.table_index($arg)); + ($reencoder:ident $arg:ident src_table) => ($reencoder.table_index($arg)); + ($reencoder:ident $arg:ident type_index) => ($reencoder.type_index($arg)); + ($reencoder:ident $arg:ident array_type_index) => ($reencoder.type_index($arg)); + ($reencoder:ident $arg:ident array_type_index_dst) => ($reencoder.type_index($arg)); + ($reencoder:ident $arg:ident array_type_index_src) => ($reencoder.type_index($arg)); + ($reencoder:ident $arg:ident struct_type_index) => ($reencoder.type_index($arg)); + ($reencoder:ident $arg:ident global_index) => ($reencoder.global_index($arg)); + ($reencoder:ident $arg:ident mem) => ($reencoder.memory_index($arg)); + ($reencoder:ident $arg:ident src_mem) => ($reencoder.memory_index($arg)); + ($reencoder:ident $arg:ident dst_mem) => ($reencoder.memory_index($arg)); + ($reencoder:ident $arg:ident data_index) => ($reencoder.data_index($arg)); + ($reencoder:ident $arg:ident elem_index) => ($reencoder.element_index($arg)); + ($reencoder:ident $arg:ident array_data_index) => ($reencoder.data_index($arg)); + ($reencoder:ident $arg:ident array_elem_index) => ($reencoder.element_index($arg)); + ($reencoder:ident $arg:ident blockty) => ($reencoder.block_type($arg)?); + ($reencoder:ident $arg:ident relative_depth) => ($arg); + ($reencoder:ident $arg:ident targets) => (( + $arg + .targets() + .collect::, wasmparser::BinaryReaderError>>()? + .into(), + $arg.default(), + )); + ($reencoder:ident $arg:ident ty) => ($reencoder.val_type($arg)?); + ($reencoder:ident $arg:ident hty) => ($reencoder.heap_type($arg)?); + ($reencoder:ident $arg:ident from_ref_type) => ($reencoder.ref_type($arg)?); + ($reencoder:ident $arg:ident to_ref_type) => ($reencoder.ref_type($arg)?); + ($reencoder:ident $arg:ident memarg) => ($reencoder.mem_arg($arg)); + ($reencoder:ident $arg:ident ordering) => ($reencoder.ordering($arg)); + ($reencoder:ident $arg:ident local_index) => ($arg); + ($reencoder:ident $arg:ident value) => ($arg); + ($reencoder:ident $arg:ident lane) => ($arg); + ($reencoder:ident $arg:ident lanes) => ($arg); + ($reencoder:ident $arg:ident array_size) => ($arg); + ($reencoder:ident $arg:ident field_index) => ($arg); + ($reencoder:ident $arg:ident try_table) => ($arg); + ($reencoder:ident $arg:ident argument_index) => ($reencoder.type_index($arg)); + ($reencoder:ident $arg:ident result_index) => ($reencoder.type_index($arg)); + ($reencoder:ident $arg:ident cont_type_index) => ($reencoder.type_index($arg)); + ($reencoder:ident $arg:ident resume_table) => (( + $arg.handlers.into_iter().map(|h| $reencoder.handle(h)).collect::>().into() + )); + } + + macro_rules! translate_build { + // This case takes the arguments of a wasmparser instruction and creates + // a wasm-encoder instruction. There are a few special cases for where + // the structure of a wasmparser instruction differs from that of + // wasm-encoder. + ($reencoder:ident $op:ident) => (Instruction::$op); + ($reencoder:ident BrTable $arg:ident) => (Instruction::BrTable($arg.0, $arg.1)); + ($reencoder:ident I32Const $arg:ident) => (Instruction::I32Const($arg)); + ($reencoder:ident I64Const $arg:ident) => (Instruction::I64Const($arg)); + ($reencoder:ident F32Const $arg:ident) => (Instruction::F32Const(f32::from_bits($arg.bits()))); + ($reencoder:ident F64Const $arg:ident) => (Instruction::F64Const(f64::from_bits($arg.bits()))); + ($reencoder:ident V128Const $arg:ident) => (Instruction::V128Const($arg.i128())); + ($reencoder:ident TryTable $table:ident) => (Instruction::TryTable($reencoder.block_type($table.ty)?, { + $table.catches.into_iter().map(|c| $reencoder.catch(c)).collect::>().into() + })); + ($reencoder:ident $op:ident $arg:ident) => (Instruction::$op($arg)); + ($reencoder:ident $op:ident $($arg:ident)*) => (Instruction::$op { $($arg),* }); + } + pub fn instruction<'a, T: ?Sized + Reencode>( reencoder: &mut T, arg: wasmparser::Operator<'a>, ) -> Result, Error> { - use crate::Instruction; - macro_rules! translate { ($( @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => { Ok(match arg { $( wasmparser::Operator::$op $({ $($arg),* })? => { $( - $(let $arg = translate!(map $arg $arg);)* + $(let $arg = translate_map!(reencoder $arg $arg);)* )? - translate!(build $op $($($arg)*)?) + translate_build!(reencoder $op $($($arg)*)?) } )* + #[cfg(feature = "simd")] + wasmparser::Operator::Simd(simd_arg) => simd_instruction(reencoder, simd_arg)?, + unexpected => unreachable!("encountered unexpected Wasm operator: {unexpected:?}"), }) }; - - // This case is used to map, based on the name of the field, from the - // wasmparser payload type to the wasm-encoder payload type through - // `Translator` as applicable. - (map $arg:ident tag_index) => (reencoder.tag_index($arg)); - (map $arg:ident function_index) => (reencoder.function_index($arg)); - (map $arg:ident table) => (reencoder.table_index($arg)); - (map $arg:ident table_index) => (reencoder.table_index($arg)); - (map $arg:ident dst_table) => (reencoder.table_index($arg)); - (map $arg:ident src_table) => (reencoder.table_index($arg)); - (map $arg:ident type_index) => (reencoder.type_index($arg)); - (map $arg:ident array_type_index) => (reencoder.type_index($arg)); - (map $arg:ident array_type_index_dst) => (reencoder.type_index($arg)); - (map $arg:ident array_type_index_src) => (reencoder.type_index($arg)); - (map $arg:ident struct_type_index) => (reencoder.type_index($arg)); - (map $arg:ident global_index) => (reencoder.global_index($arg)); - (map $arg:ident mem) => (reencoder.memory_index($arg)); - (map $arg:ident src_mem) => (reencoder.memory_index($arg)); - (map $arg:ident dst_mem) => (reencoder.memory_index($arg)); - (map $arg:ident data_index) => (reencoder.data_index($arg)); - (map $arg:ident elem_index) => (reencoder.element_index($arg)); - (map $arg:ident array_data_index) => (reencoder.data_index($arg)); - (map $arg:ident array_elem_index) => (reencoder.element_index($arg)); - (map $arg:ident blockty) => (reencoder.block_type($arg)?); - (map $arg:ident relative_depth) => ($arg); - (map $arg:ident targets) => (( - $arg - .targets() - .collect::, wasmparser::BinaryReaderError>>()? - .into(), - $arg.default(), - )); - (map $arg:ident ty) => (reencoder.val_type($arg)?); - (map $arg:ident hty) => (reencoder.heap_type($arg)?); - (map $arg:ident from_ref_type) => (reencoder.ref_type($arg)?); - (map $arg:ident to_ref_type) => (reencoder.ref_type($arg)?); - (map $arg:ident memarg) => (reencoder.mem_arg($arg)); - (map $arg:ident ordering) => (reencoder.ordering($arg)); - (map $arg:ident local_index) => ($arg); - (map $arg:ident value) => ($arg); - (map $arg:ident lane) => ($arg); - (map $arg:ident lanes) => ($arg); - (map $arg:ident array_size) => ($arg); - (map $arg:ident field_index) => ($arg); - (map $arg:ident try_table) => ($arg); - (map $arg:ident argument_index) => (reencoder.type_index($arg)); - (map $arg:ident result_index) => (reencoder.type_index($arg)); - (map $arg:ident cont_type_index) => (reencoder.type_index($arg)); - (map $arg:ident resume_table) => (( - $arg.handlers.into_iter().map(|h| reencoder.handle(h)).collect::>().into() - )); - - // This case takes the arguments of a wasmparser instruction and creates - // a wasm-encoder instruction. There are a few special cases for where - // the structure of a wasmparser instruction differs from that of - // wasm-encoder. - (build $op:ident) => (Instruction::$op); - (build BrTable $arg:ident) => (Instruction::BrTable($arg.0, $arg.1)); - (build I32Const $arg:ident) => (Instruction::I32Const($arg)); - (build I64Const $arg:ident) => (Instruction::I64Const($arg)); - (build F32Const $arg:ident) => (Instruction::F32Const(f32::from_bits($arg.bits()))); - (build F64Const $arg:ident) => (Instruction::F64Const(f64::from_bits($arg.bits()))); - (build V128Const $arg:ident) => (Instruction::V128Const($arg.i128())); - (build TryTable $table:ident) => (Instruction::TryTable(reencoder.block_type($table.ty)?, { - $table.catches.into_iter().map(|c| reencoder.catch(c)).collect::>().into() - })); - (build $op:ident $arg:ident) => (Instruction::$op($arg)); - (build $op:ident $($arg:ident)*) => (Instruction::$op { $($arg),* }); } wasmparser::for_each_operator!(translate) } + #[cfg(feature = "simd")] + fn simd_instruction<'a, T: ?Sized + Reencode>( + reencoder: &mut T, + arg: wasmparser::SimdOperator, + ) -> Result, Error> { + macro_rules! translate_simd { + ($( @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => { + Ok(match arg { + $( + wasmparser::SimdOperator::$op $({ $($arg),* })? => { + $( + $(let $arg = translate_map!(reencoder $arg $arg);)* + )? + translate_build!(reencoder $op $($($arg)*)?) + } + )* + }) + }; + } + + wasmparser::for_each_simd_operator!(translate_simd) + } + /// Parses the input `section` given from the `wasmparser` crate and adds /// all the code to the `code` section. pub fn parse_code_section( From c87a7918442f9cc87dccec35b8c2aee813727e5e Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Wed, 20 Nov 2024 00:08:34 +0100 Subject: [PATCH 35/83] apply rustfmt and avoid formatting some parts --- crates/wasm-encoder/src/reencode.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/wasm-encoder/src/reencode.rs b/crates/wasm-encoder/src/reencode.rs index 8d31a29a57..30972eefbf 100644 --- a/crates/wasm-encoder/src/reencode.rs +++ b/crates/wasm-encoder/src/reencode.rs @@ -601,9 +601,9 @@ impl Reencode for RoundtripReencoder { #[allow(missing_docs)] // FIXME pub mod utils { use super::{Error, Reencode}; + use crate::Instruction; use crate::{CoreTypeEncoder, Encode}; use std::ops::Range; - use crate::Instruction; pub fn parse_core_module( reencoder: &mut T, @@ -1547,6 +1547,7 @@ pub mod utils { } } + #[rustfmt::skip] macro_rules! translate_map { // This case is used to map, based on the name of the field, from the // wasmparser payload type to the wasm-encoder payload type through @@ -1600,6 +1601,7 @@ pub mod utils { )); } + #[rustfmt::skip] macro_rules! translate_build { // This case takes the arguments of a wasmparser instruction and creates // a wasm-encoder instruction. There are a few special cases for where From 7b0a04be982df6a2e41f2b047837366d591ff191 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Wed, 20 Nov 2024 00:26:06 +0100 Subject: [PATCH 36/83] wasm-mutate: fix compile errors --- crates/wasm-mutate/Cargo.toml | 4 +- .../src/mutators/modify_const_exprs.rs | 8 +- .../wasm-mutate/src/mutators/peephole/dfg.rs | 891 +++++++++--------- 3 files changed, 453 insertions(+), 450 deletions(-) diff --git a/crates/wasm-mutate/Cargo.toml b/crates/wasm-mutate/Cargo.toml index 6f763af313..a280018e63 100644 --- a/crates/wasm-mutate/Cargo.toml +++ b/crates/wasm-mutate/Cargo.toml @@ -13,8 +13,8 @@ workspace = true [dependencies] clap = { workspace = true, optional = true } thiserror = "1.0.28" -wasmparser = { workspace = true } -wasm-encoder = { workspace = true, features = ["wasmparser"] } +wasmparser = { workspace = true, features = ["simd"]} +wasm-encoder = { workspace = true, features = ["wasmparser", "simd"] } rand = { workspace = true } log = { workspace = true } egg = "0.6.0" diff --git a/crates/wasm-mutate/src/mutators/modify_const_exprs.rs b/crates/wasm-mutate/src/mutators/modify_const_exprs.rs index 8051e73adb..9500446150 100644 --- a/crates/wasm-mutate/src/mutators/modify_const_exprs.rs +++ b/crates/wasm-mutate/src/mutators/modify_const_exprs.rs @@ -5,7 +5,7 @@ use crate::{Error, Mutator, ReencodeResult}; use rand::Rng; use wasm_encoder::reencode::{self, Reencode, RoundtripReencoder}; use wasm_encoder::{ElementSection, GlobalSection}; -use wasmparser::{ConstExpr, ElementSectionReader, GlobalSectionReader}; +use wasmparser::{ConstExpr, ElementSectionReader, GlobalSectionReader, SimdOperator}; #[derive(PartialEq, Copy, Clone)] pub enum ConstExpressionMutator { @@ -76,7 +76,7 @@ impl<'cfg, 'wasm> Reencode for InitTranslator<'cfg, 'wasm> { O::RefNull { .. } | O::I32Const { value: 0 | 1 } | O::I64Const { value: 0 | 1 } => true, O::F32Const { value } => value.bits() == 0, O::F64Const { value } => value.bits() == 0, - O::V128Const { value } => value.i128() == 0, + O::Simd(SimdOperator::V128Const { value }) => value.i128() == 0, _ => false, }; if is_simplest { @@ -86,7 +86,7 @@ impl<'cfg, 'wasm> Reencode for InitTranslator<'cfg, 'wasm> { let ty = match op { O::I32Const { .. } => T::I32, O::I64Const { .. } => T::I64, - O::V128Const { .. } => T::V128, + O::Simd(SimdOperator::V128Const { .. }) => T::V128, O::F32Const { .. } => T::F32, O::F64Const { .. } => T::F64, O::RefFunc { .. } @@ -131,7 +131,7 @@ impl<'cfg, 'wasm> Reencode for InitTranslator<'cfg, 'wasm> { } else { self.config.rng().gen() }), - T::V128 => CE::v128_const(if let O::V128Const { value } = op { + T::V128 => CE::v128_const(if let O::Simd(SimdOperator::V128Const { value }) = op { self.config.rng().gen_range(0..value.i128() as u128) as i128 } else { self.config.rng().gen() diff --git a/crates/wasm-mutate/src/mutators/peephole/dfg.rs b/crates/wasm-mutate/src/mutators/peephole/dfg.rs index 786643141f..20df4279b2 100644 --- a/crates/wasm-mutate/src/mutators/peephole/dfg.rs +++ b/crates/wasm-mutate/src/mutators/peephole/dfg.rs @@ -11,6 +11,7 @@ use egg::{Id, Language, RecExpr}; use std::collections::HashMap; use std::ops::Range; use wasmparser::Operator; +use wasmparser::SimdOperator; /// It executes a minimal symbolic evaluation of the stack to detect operands /// location in the code for certain operators @@ -369,8 +370,10 @@ impl<'a> DFGBuilder { let (operator, _) = &operators[idx]; // Check if it is not EOF + use Operator as Op; + use SimdOperator as SimdOp; match operator { - Operator::Call { function_index } => { + Op::Call { function_index } => { let typeinfo = info.get_functype_idx(*function_index); match typeinfo { crate::module::TypeInfo::Func(tpe) => { @@ -400,234 +403,234 @@ impl<'a> DFGBuilder { } } } - Operator::LocalGet { local_index } => { + Op::LocalGet { local_index } => { self.push_node(Lang::LocalGet(*local_index), idx); } - Operator::GlobalGet { global_index } => { + Op::GlobalGet { global_index } => { self.push_node(Lang::GlobalGet(*global_index), idx); } - Operator::GlobalSet { global_index } => { + Op::GlobalSet { global_index } => { let child = self.pop_operand(idx, true); self.empty_node(Lang::GlobalSet(*global_index, Id::from(child)), idx); } - Operator::I32Const { value } => { + Op::I32Const { value } => { self.push_node(Lang::I32(*value), idx); } - Operator::I64Const { value } => { + Op::I64Const { value } => { self.push_node(Lang::I64(*value), idx); } - Operator::F32Const { value } => { + Op::F32Const { value } => { self.push_node(Lang::F32((*value).into()), idx); } - Operator::F64Const { value } => { + Op::F64Const { value } => { self.push_node(Lang::F64((*value).into()), idx); } - Operator::V128Const { value } => { + Op::Simd(SimdOp::V128Const { value }) => { self.push_node(Lang::V128(value.i128()), idx); } - Operator::LocalSet { local_index } => { + Op::LocalSet { local_index } => { let val = self.pop_operand(idx, true); self.empty_node(Lang::LocalSet(*local_index, Id::from(val)), idx); } - Operator::LocalTee { local_index } => { + Op::LocalTee { local_index } => { let val = self.pop_operand(idx, true); self.push_node(Lang::LocalTee(*local_index, Id::from(val)), idx); self.new_color(); } - Operator::Nop => { + Op::Nop => { self.empty_node(Lang::Nop, idx); } - Operator::I32Store { memarg } => self.store(idx, memarg, Lang::I32Store), - Operator::I64Store { memarg } => self.store(idx, memarg, Lang::I64Store), - Operator::F32Store { memarg } => self.store(idx, memarg, Lang::F32Store), - Operator::F64Store { memarg } => self.store(idx, memarg, Lang::F64Store), - Operator::I32Store8 { memarg } => self.store(idx, memarg, Lang::I32Store8), - Operator::I32Store16 { memarg } => self.store(idx, memarg, Lang::I32Store16), - Operator::I64Store8 { memarg } => self.store(idx, memarg, Lang::I64Store8), - Operator::I64Store16 { memarg } => self.store(idx, memarg, Lang::I64Store16), - Operator::I64Store32 { memarg } => self.store(idx, memarg, Lang::I64Store32), + Op::I32Store { memarg } => self.store(idx, memarg, Lang::I32Store), + Op::I64Store { memarg } => self.store(idx, memarg, Lang::I64Store), + Op::F32Store { memarg } => self.store(idx, memarg, Lang::F32Store), + Op::F64Store { memarg } => self.store(idx, memarg, Lang::F64Store), + Op::I32Store8 { memarg } => self.store(idx, memarg, Lang::I32Store8), + Op::I32Store16 { memarg } => self.store(idx, memarg, Lang::I32Store16), + Op::I64Store8 { memarg } => self.store(idx, memarg, Lang::I64Store8), + Op::I64Store16 { memarg } => self.store(idx, memarg, Lang::I64Store16), + Op::I64Store32 { memarg } => self.store(idx, memarg, Lang::I64Store32), // All memory loads - Operator::I32Load { memarg } => self.load(idx, memarg, Lang::I32Load), - Operator::I64Load { memarg } => self.load(idx, memarg, Lang::I64Load), - Operator::F32Load { memarg } => self.load(idx, memarg, Lang::F32Load), - Operator::F64Load { memarg } => self.load(idx, memarg, Lang::F64Load), - Operator::I32Load8S { memarg } => self.load(idx, memarg, Lang::I32Load8S), - Operator::I32Load8U { memarg } => self.load(idx, memarg, Lang::I32Load8U), - Operator::I32Load16S { memarg } => self.load(idx, memarg, Lang::I32Load16S), - Operator::I32Load16U { memarg } => self.load(idx, memarg, Lang::I32Load16U), - Operator::I64Load8S { memarg } => self.load(idx, memarg, Lang::I64Load8S), - Operator::I64Load8U { memarg } => self.load(idx, memarg, Lang::I64Load8U), - Operator::I64Load16S { memarg } => self.load(idx, memarg, Lang::I64Load16S), - Operator::I64Load16U { memarg } => self.load(idx, memarg, Lang::I64Load16U), - Operator::I64Load32S { memarg } => self.load(idx, memarg, Lang::I64Load32S), - Operator::I64Load32U { memarg } => self.load(idx, memarg, Lang::I64Load32U), - - Operator::I32Eqz => self.unop(idx, Lang::I32Eqz), - Operator::I64Eqz => self.unop(idx, Lang::I64Eqz), - - Operator::F32Eq => self.binop(idx, Lang::F32Eq), - Operator::F32Ne => self.binop(idx, Lang::F32Ne), - Operator::F32Lt => self.binop(idx, Lang::F32Lt), - Operator::F32Gt => self.binop(idx, Lang::F32Gt), - Operator::F32Le => self.binop(idx, Lang::F32Le), - Operator::F32Ge => self.binop(idx, Lang::F32Ge), - - Operator::F64Eq => self.binop(idx, Lang::F64Eq), - Operator::F64Ne => self.binop(idx, Lang::F64Ne), - Operator::F64Lt => self.binop(idx, Lang::F64Lt), - Operator::F64Gt => self.binop(idx, Lang::F64Gt), - Operator::F64Le => self.binop(idx, Lang::F64Le), - Operator::F64Ge => self.binop(idx, Lang::F64Ge), - - Operator::I32Clz => self.unop(idx, Lang::I32Clz), - Operator::I32Ctz => self.unop(idx, Lang::I32Ctz), - Operator::I64Clz => self.unop(idx, Lang::I64Clz), - Operator::I64Ctz => self.unop(idx, Lang::I64Ctz), - - Operator::F32Abs => self.unop(idx, Lang::F32Abs), - Operator::F32Neg => self.unop(idx, Lang::F32Neg), - Operator::F32Ceil => self.unop(idx, Lang::F32Ceil), - Operator::F32Floor => self.unop(idx, Lang::F32Floor), - Operator::F32Trunc => self.unop(idx, Lang::F32Trunc), - Operator::F32Nearest => self.unop(idx, Lang::F32Nearest), - Operator::F32Sqrt => self.unop(idx, Lang::F32Sqrt), - Operator::F32Add => self.binop(idx, Lang::F32Add), - Operator::F32Sub => self.binop(idx, Lang::F32Sub), - Operator::F32Mul => self.binop(idx, Lang::F32Mul), - Operator::F32Div => self.binop(idx, Lang::F32Div), - Operator::F32Min => self.binop(idx, Lang::F32Min), - Operator::F32Max => self.binop(idx, Lang::F32Max), - Operator::F32Copysign => self.binop(idx, Lang::F32Copysign), - - Operator::F64Abs => self.unop(idx, Lang::F64Abs), - Operator::F64Neg => self.unop(idx, Lang::F64Neg), - Operator::F64Ceil => self.unop(idx, Lang::F64Ceil), - Operator::F64Floor => self.unop(idx, Lang::F64Floor), - Operator::F64Trunc => self.unop(idx, Lang::F64Trunc), - Operator::F64Nearest => self.unop(idx, Lang::F64Nearest), - Operator::F64Sqrt => self.unop(idx, Lang::F64Sqrt), - Operator::F64Add => self.binop(idx, Lang::F64Add), - Operator::F64Sub => self.binop(idx, Lang::F64Sub), - Operator::F64Mul => self.binop(idx, Lang::F64Mul), - Operator::F64Div => self.binop(idx, Lang::F64Div), - Operator::F64Min => self.binop(idx, Lang::F64Min), - Operator::F64Max => self.binop(idx, Lang::F64Max), - Operator::F64Copysign => self.binop(idx, Lang::F64Copysign), - - Operator::I32TruncF32S => self.unop(idx, Lang::I32TruncF32S), - Operator::I32TruncF32U => self.unop(idx, Lang::I32TruncF32U), - Operator::I32TruncF64S => self.unop(idx, Lang::I32TruncF64S), - Operator::I32TruncF64U => self.unop(idx, Lang::I32TruncF64U), - Operator::I64TruncF32S => self.unop(idx, Lang::I64TruncF32S), - Operator::I64TruncF32U => self.unop(idx, Lang::I64TruncF32U), - Operator::I64TruncF64S => self.unop(idx, Lang::I64TruncF64S), - Operator::I64TruncF64U => self.unop(idx, Lang::I64TruncF64U), - Operator::F32ConvertI32S => self.unop(idx, Lang::F32ConvertI32S), - Operator::F32ConvertI32U => self.unop(idx, Lang::F32ConvertI32U), - Operator::F32ConvertI64S => self.unop(idx, Lang::F32ConvertI64S), - Operator::F32ConvertI64U => self.unop(idx, Lang::F32ConvertI64U), - Operator::F64ConvertI32S => self.unop(idx, Lang::F64ConvertI32S), - Operator::F64ConvertI32U => self.unop(idx, Lang::F64ConvertI32U), - Operator::F64ConvertI64S => self.unop(idx, Lang::F64ConvertI64S), - Operator::F64ConvertI64U => self.unop(idx, Lang::F64ConvertI64U), - Operator::F64PromoteF32 => self.unop(idx, Lang::F64PromoteF32), - Operator::F32DemoteF64 => self.unop(idx, Lang::F32DemoteF64), - Operator::I32ReinterpretF32 => self.unop(idx, Lang::I32ReinterpretF32), - Operator::I64ReinterpretF64 => self.unop(idx, Lang::I64ReinterpretF64), - Operator::F32ReinterpretI32 => self.unop(idx, Lang::F32ReinterpretI32), - Operator::F64ReinterpretI64 => self.unop(idx, Lang::F64ReinterpretI64), - Operator::I32TruncSatF32S => self.unop(idx, Lang::I32TruncSatF32S), - Operator::I32TruncSatF32U => self.unop(idx, Lang::I32TruncSatF32U), - Operator::I32TruncSatF64S => self.unop(idx, Lang::I32TruncSatF64S), - Operator::I32TruncSatF64U => self.unop(idx, Lang::I32TruncSatF64U), - Operator::I64TruncSatF32S => self.unop(idx, Lang::I64TruncSatF32S), - Operator::I64TruncSatF32U => self.unop(idx, Lang::I64TruncSatF32U), - Operator::I64TruncSatF64S => self.unop(idx, Lang::I64TruncSatF64S), - Operator::I64TruncSatF64U => self.unop(idx, Lang::I64TruncSatF64U), - - Operator::I32Add => self.binop(idx, Lang::I32Add), - Operator::I32Sub => self.binop(idx, Lang::I32Sub), - Operator::I32Eq => self.binop(idx, Lang::I32Eq), - Operator::I32Ne => self.binop(idx, Lang::I32Ne), - Operator::I32LtS => self.binop(idx, Lang::I32LtS), - Operator::I32LtU => self.binop(idx, Lang::I32LtU), - Operator::I32GtS => self.binop(idx, Lang::I32GtS), - Operator::I32GtU => self.binop(idx, Lang::I32GtU), - Operator::I32LeS => self.binop(idx, Lang::I32LeS), - Operator::I32LeU => self.binop(idx, Lang::I32LeU), - Operator::I32GeS => self.binop(idx, Lang::I32GeS), - Operator::I32GeU => self.binop(idx, Lang::I32GeU), - Operator::I32Mul => self.binop(idx, Lang::I32Mul), - Operator::I32DivS => self.binop(idx, Lang::I32DivS), - Operator::I32DivU => self.binop(idx, Lang::I32DivU), - Operator::I32RemS => self.binop(idx, Lang::I32RemS), - Operator::I32RemU => self.binop(idx, Lang::I32RemU), - Operator::I32Shl => self.binop(idx, Lang::I32Shl), - Operator::I32ShrS => self.binop(idx, Lang::I32ShrS), - Operator::I32ShrU => self.binop(idx, Lang::I32ShrU), - Operator::I32Xor => self.binop(idx, Lang::I32Xor), - Operator::I32Or => self.binop(idx, Lang::I32Or), - Operator::I32And => self.binop(idx, Lang::I32And), - Operator::I32Rotl => self.binop(idx, Lang::I32RotL), - Operator::I32Rotr => self.binop(idx, Lang::I32RotR), - - Operator::I64Add => self.binop(idx, Lang::I64Add), - Operator::I64Sub => self.binop(idx, Lang::I64Sub), - Operator::I64Eq => self.binop(idx, Lang::I64Eq), - Operator::I64Ne => self.binop(idx, Lang::I64Ne), - Operator::I64LtS => self.binop(idx, Lang::I64LtS), - Operator::I64LtU => self.binop(idx, Lang::I64LtU), - Operator::I64GtS => self.binop(idx, Lang::I64GtS), - Operator::I64GtU => self.binop(idx, Lang::I64GtU), - Operator::I64LeS => self.binop(idx, Lang::I64LeS), - Operator::I64LeU => self.binop(idx, Lang::I64LeU), - Operator::I64GeS => self.binop(idx, Lang::I64GeS), - Operator::I64GeU => self.binop(idx, Lang::I64GeU), - Operator::I64Mul => self.binop(idx, Lang::I64Mul), - Operator::I64DivS => self.binop(idx, Lang::I64DivS), - Operator::I64DivU => self.binop(idx, Lang::I64DivU), - Operator::I64RemS => self.binop(idx, Lang::I64RemS), - Operator::I64RemU => self.binop(idx, Lang::I64RemU), - Operator::I64Shl => self.binop(idx, Lang::I64Shl), - Operator::I64ShrS => self.binop(idx, Lang::I64ShrS), - Operator::I64ShrU => self.binop(idx, Lang::I64ShrU), - Operator::I64Xor => self.binop(idx, Lang::I64Xor), - Operator::I64Or => self.binop(idx, Lang::I64Or), - Operator::I64And => self.binop(idx, Lang::I64And), - Operator::I64Rotl => self.binop(idx, Lang::I64RotL), - Operator::I64Rotr => self.binop(idx, Lang::I64RotR), - - Operator::V128Not => self.unop(idx, Lang::V128Not), - Operator::V128And => self.binop(idx, Lang::V128And), - Operator::V128AndNot => self.binop(idx, Lang::V128AndNot), - Operator::V128Or => self.binop(idx, Lang::V128Or), - Operator::V128Xor => self.binop(idx, Lang::V128Xor), - Operator::V128AnyTrue => self.unop(idx, Lang::V128AnyTrue), - Operator::V128Bitselect => self.ternop(idx, Lang::V128Bitselect), - - Operator::Drop => { + Op::I32Load { memarg } => self.load(idx, memarg, Lang::I32Load), + Op::I64Load { memarg } => self.load(idx, memarg, Lang::I64Load), + Op::F32Load { memarg } => self.load(idx, memarg, Lang::F32Load), + Op::F64Load { memarg } => self.load(idx, memarg, Lang::F64Load), + Op::I32Load8S { memarg } => self.load(idx, memarg, Lang::I32Load8S), + Op::I32Load8U { memarg } => self.load(idx, memarg, Lang::I32Load8U), + Op::I32Load16S { memarg } => self.load(idx, memarg, Lang::I32Load16S), + Op::I32Load16U { memarg } => self.load(idx, memarg, Lang::I32Load16U), + Op::I64Load8S { memarg } => self.load(idx, memarg, Lang::I64Load8S), + Op::I64Load8U { memarg } => self.load(idx, memarg, Lang::I64Load8U), + Op::I64Load16S { memarg } => self.load(idx, memarg, Lang::I64Load16S), + Op::I64Load16U { memarg } => self.load(idx, memarg, Lang::I64Load16U), + Op::I64Load32S { memarg } => self.load(idx, memarg, Lang::I64Load32S), + Op::I64Load32U { memarg } => self.load(idx, memarg, Lang::I64Load32U), + + Op::I32Eqz => self.unop(idx, Lang::I32Eqz), + Op::I64Eqz => self.unop(idx, Lang::I64Eqz), + + Op::F32Eq => self.binop(idx, Lang::F32Eq), + Op::F32Ne => self.binop(idx, Lang::F32Ne), + Op::F32Lt => self.binop(idx, Lang::F32Lt), + Op::F32Gt => self.binop(idx, Lang::F32Gt), + Op::F32Le => self.binop(idx, Lang::F32Le), + Op::F32Ge => self.binop(idx, Lang::F32Ge), + + Op::F64Eq => self.binop(idx, Lang::F64Eq), + Op::F64Ne => self.binop(idx, Lang::F64Ne), + Op::F64Lt => self.binop(idx, Lang::F64Lt), + Op::F64Gt => self.binop(idx, Lang::F64Gt), + Op::F64Le => self.binop(idx, Lang::F64Le), + Op::F64Ge => self.binop(idx, Lang::F64Ge), + + Op::I32Clz => self.unop(idx, Lang::I32Clz), + Op::I32Ctz => self.unop(idx, Lang::I32Ctz), + Op::I64Clz => self.unop(idx, Lang::I64Clz), + Op::I64Ctz => self.unop(idx, Lang::I64Ctz), + + Op::F32Abs => self.unop(idx, Lang::F32Abs), + Op::F32Neg => self.unop(idx, Lang::F32Neg), + Op::F32Ceil => self.unop(idx, Lang::F32Ceil), + Op::F32Floor => self.unop(idx, Lang::F32Floor), + Op::F32Trunc => self.unop(idx, Lang::F32Trunc), + Op::F32Nearest => self.unop(idx, Lang::F32Nearest), + Op::F32Sqrt => self.unop(idx, Lang::F32Sqrt), + Op::F32Add => self.binop(idx, Lang::F32Add), + Op::F32Sub => self.binop(idx, Lang::F32Sub), + Op::F32Mul => self.binop(idx, Lang::F32Mul), + Op::F32Div => self.binop(idx, Lang::F32Div), + Op::F32Min => self.binop(idx, Lang::F32Min), + Op::F32Max => self.binop(idx, Lang::F32Max), + Op::F32Copysign => self.binop(idx, Lang::F32Copysign), + + Op::F64Abs => self.unop(idx, Lang::F64Abs), + Op::F64Neg => self.unop(idx, Lang::F64Neg), + Op::F64Ceil => self.unop(idx, Lang::F64Ceil), + Op::F64Floor => self.unop(idx, Lang::F64Floor), + Op::F64Trunc => self.unop(idx, Lang::F64Trunc), + Op::F64Nearest => self.unop(idx, Lang::F64Nearest), + Op::F64Sqrt => self.unop(idx, Lang::F64Sqrt), + Op::F64Add => self.binop(idx, Lang::F64Add), + Op::F64Sub => self.binop(idx, Lang::F64Sub), + Op::F64Mul => self.binop(idx, Lang::F64Mul), + Op::F64Div => self.binop(idx, Lang::F64Div), + Op::F64Min => self.binop(idx, Lang::F64Min), + Op::F64Max => self.binop(idx, Lang::F64Max), + Op::F64Copysign => self.binop(idx, Lang::F64Copysign), + + Op::I32TruncF32S => self.unop(idx, Lang::I32TruncF32S), + Op::I32TruncF32U => self.unop(idx, Lang::I32TruncF32U), + Op::I32TruncF64S => self.unop(idx, Lang::I32TruncF64S), + Op::I32TruncF64U => self.unop(idx, Lang::I32TruncF64U), + Op::I64TruncF32S => self.unop(idx, Lang::I64TruncF32S), + Op::I64TruncF32U => self.unop(idx, Lang::I64TruncF32U), + Op::I64TruncF64S => self.unop(idx, Lang::I64TruncF64S), + Op::I64TruncF64U => self.unop(idx, Lang::I64TruncF64U), + Op::F32ConvertI32S => self.unop(idx, Lang::F32ConvertI32S), + Op::F32ConvertI32U => self.unop(idx, Lang::F32ConvertI32U), + Op::F32ConvertI64S => self.unop(idx, Lang::F32ConvertI64S), + Op::F32ConvertI64U => self.unop(idx, Lang::F32ConvertI64U), + Op::F64ConvertI32S => self.unop(idx, Lang::F64ConvertI32S), + Op::F64ConvertI32U => self.unop(idx, Lang::F64ConvertI32U), + Op::F64ConvertI64S => self.unop(idx, Lang::F64ConvertI64S), + Op::F64ConvertI64U => self.unop(idx, Lang::F64ConvertI64U), + Op::F64PromoteF32 => self.unop(idx, Lang::F64PromoteF32), + Op::F32DemoteF64 => self.unop(idx, Lang::F32DemoteF64), + Op::I32ReinterpretF32 => self.unop(idx, Lang::I32ReinterpretF32), + Op::I64ReinterpretF64 => self.unop(idx, Lang::I64ReinterpretF64), + Op::F32ReinterpretI32 => self.unop(idx, Lang::F32ReinterpretI32), + Op::F64ReinterpretI64 => self.unop(idx, Lang::F64ReinterpretI64), + Op::I32TruncSatF32S => self.unop(idx, Lang::I32TruncSatF32S), + Op::I32TruncSatF32U => self.unop(idx, Lang::I32TruncSatF32U), + Op::I32TruncSatF64S => self.unop(idx, Lang::I32TruncSatF64S), + Op::I32TruncSatF64U => self.unop(idx, Lang::I32TruncSatF64U), + Op::I64TruncSatF32S => self.unop(idx, Lang::I64TruncSatF32S), + Op::I64TruncSatF32U => self.unop(idx, Lang::I64TruncSatF32U), + Op::I64TruncSatF64S => self.unop(idx, Lang::I64TruncSatF64S), + Op::I64TruncSatF64U => self.unop(idx, Lang::I64TruncSatF64U), + + Op::I32Add => self.binop(idx, Lang::I32Add), + Op::I32Sub => self.binop(idx, Lang::I32Sub), + Op::I32Eq => self.binop(idx, Lang::I32Eq), + Op::I32Ne => self.binop(idx, Lang::I32Ne), + Op::I32LtS => self.binop(idx, Lang::I32LtS), + Op::I32LtU => self.binop(idx, Lang::I32LtU), + Op::I32GtS => self.binop(idx, Lang::I32GtS), + Op::I32GtU => self.binop(idx, Lang::I32GtU), + Op::I32LeS => self.binop(idx, Lang::I32LeS), + Op::I32LeU => self.binop(idx, Lang::I32LeU), + Op::I32GeS => self.binop(idx, Lang::I32GeS), + Op::I32GeU => self.binop(idx, Lang::I32GeU), + Op::I32Mul => self.binop(idx, Lang::I32Mul), + Op::I32DivS => self.binop(idx, Lang::I32DivS), + Op::I32DivU => self.binop(idx, Lang::I32DivU), + Op::I32RemS => self.binop(idx, Lang::I32RemS), + Op::I32RemU => self.binop(idx, Lang::I32RemU), + Op::I32Shl => self.binop(idx, Lang::I32Shl), + Op::I32ShrS => self.binop(idx, Lang::I32ShrS), + Op::I32ShrU => self.binop(idx, Lang::I32ShrU), + Op::I32Xor => self.binop(idx, Lang::I32Xor), + Op::I32Or => self.binop(idx, Lang::I32Or), + Op::I32And => self.binop(idx, Lang::I32And), + Op::I32Rotl => self.binop(idx, Lang::I32RotL), + Op::I32Rotr => self.binop(idx, Lang::I32RotR), + + Op::I64Add => self.binop(idx, Lang::I64Add), + Op::I64Sub => self.binop(idx, Lang::I64Sub), + Op::I64Eq => self.binop(idx, Lang::I64Eq), + Op::I64Ne => self.binop(idx, Lang::I64Ne), + Op::I64LtS => self.binop(idx, Lang::I64LtS), + Op::I64LtU => self.binop(idx, Lang::I64LtU), + Op::I64GtS => self.binop(idx, Lang::I64GtS), + Op::I64GtU => self.binop(idx, Lang::I64GtU), + Op::I64LeS => self.binop(idx, Lang::I64LeS), + Op::I64LeU => self.binop(idx, Lang::I64LeU), + Op::I64GeS => self.binop(idx, Lang::I64GeS), + Op::I64GeU => self.binop(idx, Lang::I64GeU), + Op::I64Mul => self.binop(idx, Lang::I64Mul), + Op::I64DivS => self.binop(idx, Lang::I64DivS), + Op::I64DivU => self.binop(idx, Lang::I64DivU), + Op::I64RemS => self.binop(idx, Lang::I64RemS), + Op::I64RemU => self.binop(idx, Lang::I64RemU), + Op::I64Shl => self.binop(idx, Lang::I64Shl), + Op::I64ShrS => self.binop(idx, Lang::I64ShrS), + Op::I64ShrU => self.binop(idx, Lang::I64ShrU), + Op::I64Xor => self.binop(idx, Lang::I64Xor), + Op::I64Or => self.binop(idx, Lang::I64Or), + Op::I64And => self.binop(idx, Lang::I64And), + Op::I64Rotl => self.binop(idx, Lang::I64RotL), + Op::I64Rotr => self.binop(idx, Lang::I64RotR), + + Op::Simd(SimdOp::V128Not) => self.unop(idx, Lang::V128Not), + Op::Simd(SimdOp::V128And) => self.binop(idx, Lang::V128And), + Op::Simd(SimdOp::V128AndNot) => self.binop(idx, Lang::V128AndNot), + Op::Simd(SimdOp::V128Or) => self.binop(idx, Lang::V128Or), + Op::Simd(SimdOp::V128Xor) => self.binop(idx, Lang::V128Xor), + Op::Simd(SimdOp::V128AnyTrue) => self.unop(idx, Lang::V128AnyTrue), + Op::Simd(SimdOp::V128Bitselect) => self.ternop(idx, Lang::V128Bitselect), + + Op::Drop => { let arg = self.pop_operand(idx, false); self.empty_node(Lang::Drop([Id::from(arg)]), idx); } // conversion between integers - Operator::I32WrapI64 => self.unop(idx, Lang::Wrap), - Operator::I32Extend8S => self.unop(idx, Lang::I32Extend8S), - Operator::I32Extend16S => self.unop(idx, Lang::I32Extend16S), + Op::I32WrapI64 => self.unop(idx, Lang::Wrap), + Op::I32Extend8S => self.unop(idx, Lang::I32Extend8S), + Op::I32Extend16S => self.unop(idx, Lang::I32Extend16S), - Operator::I64Extend8S => self.unop(idx, Lang::I64Extend8S), - Operator::I64Extend16S => self.unop(idx, Lang::I64Extend16S), - Operator::I64Extend32S => self.unop(idx, Lang::I64Extend32S), - Operator::I64ExtendI32S => self.unop(idx, Lang::I64ExtendI32S), - Operator::I64ExtendI32U => self.unop(idx, Lang::I64ExtendI32U), + Op::I64Extend8S => self.unop(idx, Lang::I64Extend8S), + Op::I64Extend16S => self.unop(idx, Lang::I64Extend16S), + Op::I64Extend32S => self.unop(idx, Lang::I64Extend32S), + Op::I64ExtendI32S => self.unop(idx, Lang::I64ExtendI32S), + Op::I64ExtendI32U => self.unop(idx, Lang::I64ExtendI32U), - Operator::I32Popcnt => self.unop(idx, Lang::I32Popcnt), - Operator::I64Popcnt => self.unop(idx, Lang::I64Popcnt), + Op::I32Popcnt => self.unop(idx, Lang::I32Popcnt), + Op::I64Popcnt => self.unop(idx, Lang::I64Popcnt), - Operator::Select => { + Op::Select => { let condition = self.pop_operand(idx, false); let alternative = self.pop_operand(idx, false); let consequent = self.pop_operand(idx, false); @@ -640,14 +643,14 @@ impl<'a> DFGBuilder { idx, ); } - Operator::MemoryGrow { mem } => { + Op::MemoryGrow { mem } => { let arg = self.pop_operand(idx, false); self.push_node(Lang::MemoryGrow(*mem, Id::from(arg)), idx); } - Operator::MemorySize { mem } => { + Op::MemorySize { mem } => { self.push_node(Lang::MemorySize(*mem), idx); } - Operator::TableGrow { table } => { + Op::TableGrow { table } => { let elem = self.pop_operand(idx, false); let size = self.pop_operand(idx, false); self.push_node( @@ -655,19 +658,19 @@ impl<'a> DFGBuilder { idx, ); } - Operator::TableSize { table } => { + Op::TableSize { table } => { self.push_node(Lang::TableSize(*table), idx); } - Operator::DataDrop { data_index } => { + Op::DataDrop { data_index } => { self.empty_node(Lang::DataDrop(*data_index), idx); } - Operator::ElemDrop { elem_index } => { + Op::ElemDrop { elem_index } => { self.empty_node(Lang::ElemDrop(*elem_index), idx); } - Operator::MemoryInit { mem, data_index } => { + Op::MemoryInit { mem, data_index } => { let a = Id::from(self.pop_operand(idx, false)); let b = Id::from(self.pop_operand(idx, false)); let c = Id::from(self.pop_operand(idx, false)); @@ -682,7 +685,7 @@ impl<'a> DFGBuilder { idx, ); } - Operator::MemoryCopy { src_mem, dst_mem } => { + Op::MemoryCopy { src_mem, dst_mem } => { let a = Id::from(self.pop_operand(idx, false)); let b = Id::from(self.pop_operand(idx, false)); let c = Id::from(self.pop_operand(idx, false)); @@ -698,14 +701,14 @@ impl<'a> DFGBuilder { ); } - Operator::MemoryFill { mem } => { + Op::MemoryFill { mem } => { let a = Id::from(self.pop_operand(idx, false)); let b = Id::from(self.pop_operand(idx, false)); let c = Id::from(self.pop_operand(idx, false)); self.empty_node(Lang::MemoryFill(*mem, [c, b, a]), idx); } - Operator::TableInit { table, elem_index } => { + Op::TableInit { table, elem_index } => { let a = Id::from(self.pop_operand(idx, false)); let b = Id::from(self.pop_operand(idx, false)); let c = Id::from(self.pop_operand(idx, false)); @@ -720,7 +723,7 @@ impl<'a> DFGBuilder { idx, ); } - Operator::TableCopy { + Op::TableCopy { src_table, dst_table, } => { @@ -739,342 +742,342 @@ impl<'a> DFGBuilder { ); } - Operator::TableFill { table } => { + Op::TableFill { table } => { let a = Id::from(self.pop_operand(idx, false)); let b = Id::from(self.pop_operand(idx, false)); let c = Id::from(self.pop_operand(idx, false)); self.empty_node(Lang::TableFill(*table, [c, b, a]), idx); } - Operator::TableGet { table } => { + Op::TableGet { table } => { let arg = Id::from(self.pop_operand(idx, false)); self.push_node(Lang::TableGet(*table, arg), idx); } - Operator::TableSet { table } => { + Op::TableSet { table } => { let arg1 = Id::from(self.pop_operand(idx, false)); let arg2 = Id::from(self.pop_operand(idx, false)); self.empty_node(Lang::TableSet(*table, [arg2, arg1]), idx); } - Operator::RefNull { + Op::RefNull { hty: wasmparser::HeapType::EXTERN, } => { self.push_node(Lang::RefNull(RefType::Extern), idx); } - Operator::RefNull { + Op::RefNull { hty: wasmparser::HeapType::FUNC, } => { self.push_node(Lang::RefNull(RefType::Func), idx); } - Operator::RefFunc { function_index } => { + Op::RefFunc { function_index } => { self.push_node(Lang::RefFunc(*function_index), idx); } - Operator::RefIsNull => { + Op::RefIsNull => { let arg = Id::from(self.pop_operand(idx, false)); self.push_node(Lang::RefIsNull(arg), idx); } - Operator::V128Load { memarg } => self.load(idx, memarg, Lang::V128Load), - Operator::V128Load8x8S { memarg } => self.load(idx, memarg, Lang::V128Load8x8S), - Operator::V128Load8x8U { memarg } => self.load(idx, memarg, Lang::V128Load8x8U), - Operator::V128Load16x4S { memarg } => self.load(idx, memarg, Lang::V128Load16x4S), - Operator::V128Load16x4U { memarg } => self.load(idx, memarg, Lang::V128Load16x4U), - Operator::V128Load32x2S { memarg } => self.load(idx, memarg, Lang::V128Load32x2S), - Operator::V128Load32x2U { memarg } => self.load(idx, memarg, Lang::V128Load32x2U), - Operator::V128Load8Splat { memarg } => self.load(idx, memarg, Lang::V128Load8Splat), - Operator::V128Load16Splat { memarg } => { + Op::Simd(SimdOp::V128Load { memarg }) => self.load(idx, memarg, Lang::V128Load), + Op::Simd(SimdOp::V128Load8x8S { memarg }) => self.load(idx, memarg, Lang::V128Load8x8S), + Op::Simd(SimdOp::V128Load8x8U { memarg }) => self.load(idx, memarg, Lang::V128Load8x8U), + Op::Simd(SimdOp::V128Load16x4S { memarg }) => self.load(idx, memarg, Lang::V128Load16x4S), + Op::Simd(SimdOp::V128Load16x4U { memarg }) => self.load(idx, memarg, Lang::V128Load16x4U), + Op::Simd(SimdOp::V128Load32x2S { memarg }) => self.load(idx, memarg, Lang::V128Load32x2S), + Op::Simd(SimdOp::V128Load32x2U { memarg }) => self.load(idx, memarg, Lang::V128Load32x2U), + Op::Simd(SimdOp::V128Load8Splat { memarg }) => self.load(idx, memarg, Lang::V128Load8Splat), + Op::Simd(SimdOp::V128Load16Splat { memarg }) => { self.load(idx, memarg, Lang::V128Load16Splat) } - Operator::V128Load32Splat { memarg } => { + Op::Simd(SimdOp::V128Load32Splat { memarg }) => { self.load(idx, memarg, Lang::V128Load32Splat) } - Operator::V128Load64Splat { memarg } => { + Op::Simd(SimdOp::V128Load64Splat { memarg }) => { self.load(idx, memarg, Lang::V128Load64Splat) } - Operator::V128Load32Zero { memarg } => self.load(idx, memarg, Lang::V128Load32Zero), - Operator::V128Load64Zero { memarg } => self.load(idx, memarg, Lang::V128Load64Zero), - Operator::V128Store { memarg } => self.store(idx, memarg, Lang::V128Store), - Operator::V128Load8Lane { memarg, lane } => { + Op::Simd(SimdOp::V128Load32Zero { memarg }) => self.load(idx, memarg, Lang::V128Load32Zero), + Op::Simd(SimdOp::V128Load64Zero { memarg }) => self.load(idx, memarg, Lang::V128Load64Zero), + Op::Simd(SimdOp::V128Store { memarg }) => self.store(idx, memarg, Lang::V128Store), + Op::Simd(SimdOp::V128Load8Lane { memarg, lane }) => { self.load_lane(idx, memarg, lane, Lang::V128Load8Lane) } - Operator::V128Load16Lane { memarg, lane } => { + Op::Simd(SimdOp::V128Load16Lane { memarg, lane }) => { self.load_lane(idx, memarg, lane, Lang::V128Load16Lane) } - Operator::V128Load32Lane { memarg, lane } => { + Op::Simd(SimdOp::V128Load32Lane { memarg, lane }) => { self.load_lane(idx, memarg, lane, Lang::V128Load32Lane) } - Operator::V128Load64Lane { memarg, lane } => { + Op::Simd(SimdOp::V128Load64Lane { memarg, lane }) => { self.load_lane(idx, memarg, lane, Lang::V128Load64Lane) } - Operator::V128Store8Lane { memarg, lane } => { + Op::Simd(SimdOp::V128Store8Lane { memarg, lane }) => { self.store_lane(idx, memarg, lane, Lang::V128Store8Lane) } - Operator::V128Store16Lane { memarg, lane } => { + Op::Simd(SimdOp::V128Store16Lane { memarg, lane }) => { self.store_lane(idx, memarg, lane, Lang::V128Store16Lane) } - Operator::V128Store32Lane { memarg, lane } => { + Op::Simd(SimdOp::V128Store32Lane { memarg, lane }) => { self.store_lane(idx, memarg, lane, Lang::V128Store32Lane) } - Operator::V128Store64Lane { memarg, lane } => { + Op::Simd(SimdOp::V128Store64Lane { memarg, lane }) => { self.store_lane(idx, memarg, lane, Lang::V128Store64Lane) } - Operator::I8x16ExtractLaneS { lane } => { + Op::Simd(SimdOp::I8x16ExtractLaneS { lane }) => { self.extract_lane(idx, lane, Lang::I8x16ExtractLaneS) } - Operator::I8x16ExtractLaneU { lane } => { + Op::Simd(SimdOp::I8x16ExtractLaneU { lane }) => { self.extract_lane(idx, lane, Lang::I8x16ExtractLaneU) } - Operator::I8x16ReplaceLane { lane } => { + Op::Simd(SimdOp::I8x16ReplaceLane { lane }) => { self.replace_lane(idx, lane, Lang::I8x16ReplaceLane) } - Operator::I16x8ExtractLaneS { lane } => { + Op::Simd(SimdOp::I16x8ExtractLaneS { lane }) => { self.extract_lane(idx, lane, Lang::I16x8ExtractLaneS) } - Operator::I16x8ExtractLaneU { lane } => { + Op::Simd(SimdOp::I16x8ExtractLaneU { lane }) => { self.extract_lane(idx, lane, Lang::I16x8ExtractLaneU) } - Operator::I16x8ReplaceLane { lane } => { + Op::Simd(SimdOp::I16x8ReplaceLane { lane }) => { self.replace_lane(idx, lane, Lang::I16x8ReplaceLane) } - Operator::I32x4ExtractLane { lane } => { + Op::Simd(SimdOp::I32x4ExtractLane { lane }) => { self.extract_lane(idx, lane, Lang::I32x4ExtractLane) } - Operator::I32x4ReplaceLane { lane } => { + Op::Simd(SimdOp::I32x4ReplaceLane { lane }) => { self.replace_lane(idx, lane, Lang::I32x4ReplaceLane) } - Operator::I64x2ExtractLane { lane } => { + Op::Simd(SimdOp::I64x2ExtractLane { lane }) => { self.extract_lane(idx, lane, Lang::I64x2ExtractLane) } - Operator::I64x2ReplaceLane { lane } => { + Op::Simd(SimdOp::I64x2ReplaceLane { lane }) => { self.replace_lane(idx, lane, Lang::I64x2ReplaceLane) } - Operator::F32x4ExtractLane { lane } => { + Op::Simd(SimdOp::F32x4ExtractLane { lane }) => { self.extract_lane(idx, lane, Lang::F32x4ExtractLane) } - Operator::F32x4ReplaceLane { lane } => { + Op::Simd(SimdOp::F32x4ReplaceLane { lane }) => { self.replace_lane(idx, lane, Lang::F32x4ReplaceLane) } - Operator::F64x2ExtractLane { lane } => { + Op::Simd(SimdOp::F64x2ExtractLane { lane }) => { self.extract_lane(idx, lane, Lang::F64x2ExtractLane) } - Operator::F64x2ReplaceLane { lane } => { + Op::Simd(SimdOp::F64x2ReplaceLane { lane }) => { self.replace_lane(idx, lane, Lang::F64x2ReplaceLane) } - Operator::I8x16Swizzle => self.binop(idx, Lang::I8x16Swizzle), - Operator::I8x16Shuffle { lanes } => { + Op::Simd(SimdOp::I8x16Swizzle) => self.binop(idx, Lang::I8x16Swizzle), + Op::Simd(SimdOp::I8x16Shuffle { lanes }) => { let a = Id::from(self.pop_operand(idx, false)); let b = Id::from(self.pop_operand(idx, false)); self.push_node(Lang::I8x16Shuffle(Shuffle { indices: *lanes }, [b, a]), idx); } - Operator::I8x16Splat => self.unop(idx, Lang::I8x16Splat), - Operator::I16x8Splat => self.unop(idx, Lang::I16x8Splat), - Operator::I32x4Splat => self.unop(idx, Lang::I32x4Splat), - Operator::I64x2Splat => self.unop(idx, Lang::I64x2Splat), - Operator::F32x4Splat => self.unop(idx, Lang::F32x4Splat), - Operator::F64x2Splat => self.unop(idx, Lang::F64x2Splat), - - Operator::I8x16Eq => self.binop(idx, Lang::I8x16Eq), - Operator::I8x16Ne => self.binop(idx, Lang::I8x16Ne), - Operator::I8x16LtS => self.binop(idx, Lang::I8x16LtS), - Operator::I8x16LtU => self.binop(idx, Lang::I8x16LtU), - Operator::I8x16GtS => self.binop(idx, Lang::I8x16GtS), - Operator::I8x16GtU => self.binop(idx, Lang::I8x16GtU), - Operator::I8x16LeS => self.binop(idx, Lang::I8x16LeS), - Operator::I8x16LeU => self.binop(idx, Lang::I8x16LeU), - Operator::I8x16GeS => self.binop(idx, Lang::I8x16GeS), - Operator::I8x16GeU => self.binop(idx, Lang::I8x16GeU), - Operator::I16x8Eq => self.binop(idx, Lang::I16x8Eq), - Operator::I16x8Ne => self.binop(idx, Lang::I16x8Ne), - Operator::I16x8LtS => self.binop(idx, Lang::I16x8LtS), - Operator::I16x8LtU => self.binop(idx, Lang::I16x8LtU), - Operator::I16x8GtS => self.binop(idx, Lang::I16x8GtS), - Operator::I16x8GtU => self.binop(idx, Lang::I16x8GtU), - Operator::I16x8LeS => self.binop(idx, Lang::I16x8LeS), - Operator::I16x8LeU => self.binop(idx, Lang::I16x8LeU), - Operator::I16x8GeS => self.binop(idx, Lang::I16x8GeS), - Operator::I16x8GeU => self.binop(idx, Lang::I16x8GeU), - Operator::I32x4Eq => self.binop(idx, Lang::I32x4Eq), - Operator::I32x4Ne => self.binop(idx, Lang::I32x4Ne), - Operator::I32x4LtS => self.binop(idx, Lang::I32x4LtS), - Operator::I32x4LtU => self.binop(idx, Lang::I32x4LtU), - Operator::I32x4GtS => self.binop(idx, Lang::I32x4GtS), - Operator::I32x4GtU => self.binop(idx, Lang::I32x4GtU), - Operator::I32x4LeS => self.binop(idx, Lang::I32x4LeS), - Operator::I32x4LeU => self.binop(idx, Lang::I32x4LeU), - Operator::I32x4GeS => self.binop(idx, Lang::I32x4GeS), - Operator::I32x4GeU => self.binop(idx, Lang::I32x4GeU), - Operator::I64x2Eq => self.binop(idx, Lang::I64x2Eq), - Operator::I64x2Ne => self.binop(idx, Lang::I64x2Ne), - Operator::I64x2LtS => self.binop(idx, Lang::I64x2LtS), - Operator::I64x2GtS => self.binop(idx, Lang::I64x2GtS), - Operator::I64x2LeS => self.binop(idx, Lang::I64x2LeS), - Operator::I64x2GeS => self.binop(idx, Lang::I64x2GeS), - Operator::F32x4Eq => self.binop(idx, Lang::F32x4Eq), - Operator::F32x4Ne => self.binop(idx, Lang::F32x4Ne), - Operator::F32x4Lt => self.binop(idx, Lang::F32x4Lt), - Operator::F32x4Gt => self.binop(idx, Lang::F32x4Gt), - Operator::F32x4Le => self.binop(idx, Lang::F32x4Le), - Operator::F32x4Ge => self.binop(idx, Lang::F32x4Ge), - Operator::F64x2Eq => self.binop(idx, Lang::F64x2Eq), - Operator::F64x2Ne => self.binop(idx, Lang::F64x2Ne), - Operator::F64x2Lt => self.binop(idx, Lang::F64x2Lt), - Operator::F64x2Gt => self.binop(idx, Lang::F64x2Gt), - Operator::F64x2Le => self.binop(idx, Lang::F64x2Le), - Operator::F64x2Ge => self.binop(idx, Lang::F64x2Ge), - - Operator::I8x16Abs => self.unop(idx, Lang::I8x16Abs), - Operator::I8x16Neg => self.unop(idx, Lang::I8x16Neg), - Operator::I8x16Popcnt => self.unop(idx, Lang::I8x16Popcnt), - Operator::I8x16AllTrue => self.unop(idx, Lang::I8x16AllTrue), - Operator::I8x16Bitmask => self.unop(idx, Lang::I8x16Bitmask), - Operator::I8x16NarrowI16x8S => self.binop(idx, Lang::I8x16NarrowI16x8S), - Operator::I8x16NarrowI16x8U => self.binop(idx, Lang::I8x16NarrowI16x8U), - Operator::I8x16Shl => self.binop(idx, Lang::I8x16Shl), - Operator::I8x16ShrS => self.binop(idx, Lang::I8x16ShrS), - Operator::I8x16ShrU => self.binop(idx, Lang::I8x16ShrU), - Operator::I8x16Add => self.binop(idx, Lang::I8x16Add), - Operator::I8x16AddSatS => self.binop(idx, Lang::I8x16AddSatS), - Operator::I8x16AddSatU => self.binop(idx, Lang::I8x16AddSatU), - Operator::I8x16Sub => self.binop(idx, Lang::I8x16Sub), - Operator::I8x16SubSatS => self.binop(idx, Lang::I8x16SubSatS), - Operator::I8x16SubSatU => self.binop(idx, Lang::I8x16SubSatU), - Operator::I8x16MinS => self.binop(idx, Lang::I8x16MinS), - Operator::I8x16MinU => self.binop(idx, Lang::I8x16MinU), - Operator::I8x16MaxS => self.binop(idx, Lang::I8x16MaxS), - Operator::I8x16MaxU => self.binop(idx, Lang::I8x16MaxU), - Operator::I8x16AvgrU => self.binop(idx, Lang::I8x16AvgrU), - - Operator::I16x8ExtAddPairwiseI8x16S => { + Op::Simd(SimdOp::I8x16Splat) => self.unop(idx, Lang::I8x16Splat), + Op::Simd(SimdOp::I16x8Splat) => self.unop(idx, Lang::I16x8Splat), + Op::Simd(SimdOp::I32x4Splat) => self.unop(idx, Lang::I32x4Splat), + Op::Simd(SimdOp::I64x2Splat) => self.unop(idx, Lang::I64x2Splat), + Op::Simd(SimdOp::F32x4Splat) => self.unop(idx, Lang::F32x4Splat), + Op::Simd(SimdOp::F64x2Splat) => self.unop(idx, Lang::F64x2Splat), + + Op::Simd(SimdOp::I8x16Eq) => self.binop(idx, Lang::I8x16Eq), + Op::Simd(SimdOp::I8x16Ne) => self.binop(idx, Lang::I8x16Ne), + Op::Simd(SimdOp::I8x16LtS) => self.binop(idx, Lang::I8x16LtS), + Op::Simd(SimdOp::I8x16LtU) => self.binop(idx, Lang::I8x16LtU), + Op::Simd(SimdOp::I8x16GtS) => self.binop(idx, Lang::I8x16GtS), + Op::Simd(SimdOp::I8x16GtU) => self.binop(idx, Lang::I8x16GtU), + Op::Simd(SimdOp::I8x16LeS) => self.binop(idx, Lang::I8x16LeS), + Op::Simd(SimdOp::I8x16LeU) => self.binop(idx, Lang::I8x16LeU), + Op::Simd(SimdOp::I8x16GeS) => self.binop(idx, Lang::I8x16GeS), + Op::Simd(SimdOp::I8x16GeU) => self.binop(idx, Lang::I8x16GeU), + Op::Simd(SimdOp::I16x8Eq) => self.binop(idx, Lang::I16x8Eq), + Op::Simd(SimdOp::I16x8Ne) => self.binop(idx, Lang::I16x8Ne), + Op::Simd(SimdOp::I16x8LtS) => self.binop(idx, Lang::I16x8LtS), + Op::Simd(SimdOp::I16x8LtU) => self.binop(idx, Lang::I16x8LtU), + Op::Simd(SimdOp::I16x8GtS) => self.binop(idx, Lang::I16x8GtS), + Op::Simd(SimdOp::I16x8GtU) => self.binop(idx, Lang::I16x8GtU), + Op::Simd(SimdOp::I16x8LeS) => self.binop(idx, Lang::I16x8LeS), + Op::Simd(SimdOp::I16x8LeU) => self.binop(idx, Lang::I16x8LeU), + Op::Simd(SimdOp::I16x8GeS) => self.binop(idx, Lang::I16x8GeS), + Op::Simd(SimdOp::I16x8GeU) => self.binop(idx, Lang::I16x8GeU), + Op::Simd(SimdOp::I32x4Eq) => self.binop(idx, Lang::I32x4Eq), + Op::Simd(SimdOp::I32x4Ne) => self.binop(idx, Lang::I32x4Ne), + Op::Simd(SimdOp::I32x4LtS) => self.binop(idx, Lang::I32x4LtS), + Op::Simd(SimdOp::I32x4LtU) => self.binop(idx, Lang::I32x4LtU), + Op::Simd(SimdOp::I32x4GtS) => self.binop(idx, Lang::I32x4GtS), + Op::Simd(SimdOp::I32x4GtU) => self.binop(idx, Lang::I32x4GtU), + Op::Simd(SimdOp::I32x4LeS) => self.binop(idx, Lang::I32x4LeS), + Op::Simd(SimdOp::I32x4LeU) => self.binop(idx, Lang::I32x4LeU), + Op::Simd(SimdOp::I32x4GeS) => self.binop(idx, Lang::I32x4GeS), + Op::Simd(SimdOp::I32x4GeU) => self.binop(idx, Lang::I32x4GeU), + Op::Simd(SimdOp::I64x2Eq) => self.binop(idx, Lang::I64x2Eq), + Op::Simd(SimdOp::I64x2Ne) => self.binop(idx, Lang::I64x2Ne), + Op::Simd(SimdOp::I64x2LtS) => self.binop(idx, Lang::I64x2LtS), + Op::Simd(SimdOp::I64x2GtS) => self.binop(idx, Lang::I64x2GtS), + Op::Simd(SimdOp::I64x2LeS) => self.binop(idx, Lang::I64x2LeS), + Op::Simd(SimdOp::I64x2GeS) => self.binop(idx, Lang::I64x2GeS), + Op::Simd(SimdOp::F32x4Eq) => self.binop(idx, Lang::F32x4Eq), + Op::Simd(SimdOp::F32x4Ne) => self.binop(idx, Lang::F32x4Ne), + Op::Simd(SimdOp::F32x4Lt) => self.binop(idx, Lang::F32x4Lt), + Op::Simd(SimdOp::F32x4Gt) => self.binop(idx, Lang::F32x4Gt), + Op::Simd(SimdOp::F32x4Le) => self.binop(idx, Lang::F32x4Le), + Op::Simd(SimdOp::F32x4Ge) => self.binop(idx, Lang::F32x4Ge), + Op::Simd(SimdOp::F64x2Eq) => self.binop(idx, Lang::F64x2Eq), + Op::Simd(SimdOp::F64x2Ne) => self.binop(idx, Lang::F64x2Ne), + Op::Simd(SimdOp::F64x2Lt) => self.binop(idx, Lang::F64x2Lt), + Op::Simd(SimdOp::F64x2Gt) => self.binop(idx, Lang::F64x2Gt), + Op::Simd(SimdOp::F64x2Le) => self.binop(idx, Lang::F64x2Le), + Op::Simd(SimdOp::F64x2Ge) => self.binop(idx, Lang::F64x2Ge), + + Op::Simd(SimdOp::I8x16Abs) => self.unop(idx, Lang::I8x16Abs), + Op::Simd(SimdOp::I8x16Neg) => self.unop(idx, Lang::I8x16Neg), + Op::Simd(SimdOp::I8x16Popcnt) => self.unop(idx, Lang::I8x16Popcnt), + Op::Simd(SimdOp::I8x16AllTrue) => self.unop(idx, Lang::I8x16AllTrue), + Op::Simd(SimdOp::I8x16Bitmask) => self.unop(idx, Lang::I8x16Bitmask), + Op::Simd(SimdOp::I8x16NarrowI16x8S) => self.binop(idx, Lang::I8x16NarrowI16x8S), + Op::Simd(SimdOp::I8x16NarrowI16x8U) => self.binop(idx, Lang::I8x16NarrowI16x8U), + Op::Simd(SimdOp::I8x16Shl) => self.binop(idx, Lang::I8x16Shl), + Op::Simd(SimdOp::I8x16ShrS) => self.binop(idx, Lang::I8x16ShrS), + Op::Simd(SimdOp::I8x16ShrU) => self.binop(idx, Lang::I8x16ShrU), + Op::Simd(SimdOp::I8x16Add) => self.binop(idx, Lang::I8x16Add), + Op::Simd(SimdOp::I8x16AddSatS) => self.binop(idx, Lang::I8x16AddSatS), + Op::Simd(SimdOp::I8x16AddSatU) => self.binop(idx, Lang::I8x16AddSatU), + Op::Simd(SimdOp::I8x16Sub) => self.binop(idx, Lang::I8x16Sub), + Op::Simd(SimdOp::I8x16SubSatS) => self.binop(idx, Lang::I8x16SubSatS), + Op::Simd(SimdOp::I8x16SubSatU) => self.binop(idx, Lang::I8x16SubSatU), + Op::Simd(SimdOp::I8x16MinS) => self.binop(idx, Lang::I8x16MinS), + Op::Simd(SimdOp::I8x16MinU) => self.binop(idx, Lang::I8x16MinU), + Op::Simd(SimdOp::I8x16MaxS) => self.binop(idx, Lang::I8x16MaxS), + Op::Simd(SimdOp::I8x16MaxU) => self.binop(idx, Lang::I8x16MaxU), + Op::Simd(SimdOp::I8x16AvgrU) => self.binop(idx, Lang::I8x16AvgrU), + + Op::Simd(SimdOp::I16x8ExtAddPairwiseI8x16S) => { self.unop(idx, Lang::I16x8ExtAddPairwiseI8x16S) } - Operator::I16x8ExtAddPairwiseI8x16U => { + Op::Simd(SimdOp::I16x8ExtAddPairwiseI8x16U) => { self.unop(idx, Lang::I16x8ExtAddPairwiseI8x16U) } - Operator::I16x8Abs => self.unop(idx, Lang::I16x8Abs), - Operator::I16x8Neg => self.unop(idx, Lang::I16x8Neg), - Operator::I16x8Q15MulrSatS => self.binop(idx, Lang::I16x8Q15MulrSatS), - Operator::I16x8AllTrue => self.unop(idx, Lang::I16x8AllTrue), - Operator::I16x8Bitmask => self.unop(idx, Lang::I16x8Bitmask), - Operator::I16x8NarrowI32x4S => self.binop(idx, Lang::I16x8NarrowI32x4S), - Operator::I16x8NarrowI32x4U => self.binop(idx, Lang::I16x8NarrowI32x4U), - Operator::I16x8ExtendLowI8x16S => self.unop(idx, Lang::I16x8ExtendLowI8x16S), - Operator::I16x8ExtendHighI8x16S => self.unop(idx, Lang::I16x8ExtendHighI8x16S), - Operator::I16x8ExtendLowI8x16U => self.unop(idx, Lang::I16x8ExtendLowI8x16U), - Operator::I16x8ExtendHighI8x16U => self.unop(idx, Lang::I16x8ExtendHighI8x16U), - Operator::I16x8Shl => self.binop(idx, Lang::I16x8Shl), - Operator::I16x8ShrS => self.binop(idx, Lang::I16x8ShrS), - Operator::I16x8ShrU => self.binop(idx, Lang::I16x8ShrU), - Operator::I16x8Add => self.binop(idx, Lang::I16x8Add), - Operator::I16x8AddSatS => self.binop(idx, Lang::I16x8AddSatS), - Operator::I16x8AddSatU => self.binop(idx, Lang::I16x8AddSatU), - Operator::I16x8Sub => self.binop(idx, Lang::I16x8Sub), - Operator::I16x8SubSatS => self.binop(idx, Lang::I16x8SubSatS), - Operator::I16x8SubSatU => self.binop(idx, Lang::I16x8SubSatU), - Operator::I16x8Mul => self.binop(idx, Lang::I16x8Mul), - Operator::I16x8MinS => self.binop(idx, Lang::I16x8MinS), - Operator::I16x8MinU => self.binop(idx, Lang::I16x8MinU), - Operator::I16x8MaxS => self.binop(idx, Lang::I16x8MaxS), - Operator::I16x8MaxU => self.binop(idx, Lang::I16x8MaxU), - Operator::I16x8AvgrU => self.binop(idx, Lang::I16x8AvgrU), - Operator::I16x8ExtMulLowI8x16S => self.binop(idx, Lang::I16x8ExtMulLowI8x16S), - Operator::I16x8ExtMulHighI8x16S => self.binop(idx, Lang::I16x8ExtMulHighI8x16S), - Operator::I16x8ExtMulLowI8x16U => self.binop(idx, Lang::I16x8ExtMulLowI8x16U), - Operator::I16x8ExtMulHighI8x16U => self.binop(idx, Lang::I16x8ExtMulHighI8x16U), - - Operator::I32x4ExtAddPairwiseI16x8S => { + Op::Simd(SimdOp::I16x8Abs) => self.unop(idx, Lang::I16x8Abs), + Op::Simd(SimdOp::I16x8Neg) => self.unop(idx, Lang::I16x8Neg), + Op::Simd(SimdOp::I16x8Q15MulrSatS) => self.binop(idx, Lang::I16x8Q15MulrSatS), + Op::Simd(SimdOp::I16x8AllTrue) => self.unop(idx, Lang::I16x8AllTrue), + Op::Simd(SimdOp::I16x8Bitmask) => self.unop(idx, Lang::I16x8Bitmask), + Op::Simd(SimdOp::I16x8NarrowI32x4S) => self.binop(idx, Lang::I16x8NarrowI32x4S), + Op::Simd(SimdOp::I16x8NarrowI32x4U) => self.binop(idx, Lang::I16x8NarrowI32x4U), + Op::Simd(SimdOp::I16x8ExtendLowI8x16S) => self.unop(idx, Lang::I16x8ExtendLowI8x16S), + Op::Simd(SimdOp::I16x8ExtendHighI8x16S) => self.unop(idx, Lang::I16x8ExtendHighI8x16S), + Op::Simd(SimdOp::I16x8ExtendLowI8x16U) => self.unop(idx, Lang::I16x8ExtendLowI8x16U), + Op::Simd(SimdOp::I16x8ExtendHighI8x16U) => self.unop(idx, Lang::I16x8ExtendHighI8x16U), + Op::Simd(SimdOp::I16x8Shl) => self.binop(idx, Lang::I16x8Shl), + Op::Simd(SimdOp::I16x8ShrS) => self.binop(idx, Lang::I16x8ShrS), + Op::Simd(SimdOp::I16x8ShrU) => self.binop(idx, Lang::I16x8ShrU), + Op::Simd(SimdOp::I16x8Add) => self.binop(idx, Lang::I16x8Add), + Op::Simd(SimdOp::I16x8AddSatS) => self.binop(idx, Lang::I16x8AddSatS), + Op::Simd(SimdOp::I16x8AddSatU) => self.binop(idx, Lang::I16x8AddSatU), + Op::Simd(SimdOp::I16x8Sub) => self.binop(idx, Lang::I16x8Sub), + Op::Simd(SimdOp::I16x8SubSatS) => self.binop(idx, Lang::I16x8SubSatS), + Op::Simd(SimdOp::I16x8SubSatU) => self.binop(idx, Lang::I16x8SubSatU), + Op::Simd(SimdOp::I16x8Mul) => self.binop(idx, Lang::I16x8Mul), + Op::Simd(SimdOp::I16x8MinS) => self.binop(idx, Lang::I16x8MinS), + Op::Simd(SimdOp::I16x8MinU) => self.binop(idx, Lang::I16x8MinU), + Op::Simd(SimdOp::I16x8MaxS) => self.binop(idx, Lang::I16x8MaxS), + Op::Simd(SimdOp::I16x8MaxU) => self.binop(idx, Lang::I16x8MaxU), + Op::Simd(SimdOp::I16x8AvgrU) => self.binop(idx, Lang::I16x8AvgrU), + Op::Simd(SimdOp::I16x8ExtMulLowI8x16S) => self.binop(idx, Lang::I16x8ExtMulLowI8x16S), + Op::Simd(SimdOp::I16x8ExtMulHighI8x16S) => self.binop(idx, Lang::I16x8ExtMulHighI8x16S), + Op::Simd(SimdOp::I16x8ExtMulLowI8x16U) => self.binop(idx, Lang::I16x8ExtMulLowI8x16U), + Op::Simd(SimdOp::I16x8ExtMulHighI8x16U) => self.binop(idx, Lang::I16x8ExtMulHighI8x16U), + + Op::Simd(SimdOp::I32x4ExtAddPairwiseI16x8S) => { self.unop(idx, Lang::I32x4ExtAddPairwiseI16x8S) } - Operator::I32x4ExtAddPairwiseI16x8U => { + Op::Simd(SimdOp::I32x4ExtAddPairwiseI16x8U) => { self.unop(idx, Lang::I32x4ExtAddPairwiseI16x8U) } - Operator::I32x4Abs => self.unop(idx, Lang::I32x4Abs), - Operator::I32x4Neg => self.unop(idx, Lang::I32x4Neg), - Operator::I32x4AllTrue => self.unop(idx, Lang::I32x4AllTrue), - Operator::I32x4Bitmask => self.unop(idx, Lang::I32x4Bitmask), - Operator::I32x4ExtendLowI16x8S => self.unop(idx, Lang::I32x4ExtendLowI16x8S), - Operator::I32x4ExtendHighI16x8S => self.unop(idx, Lang::I32x4ExtendHighI16x8S), - Operator::I32x4ExtendLowI16x8U => self.unop(idx, Lang::I32x4ExtendLowI16x8U), - Operator::I32x4ExtendHighI16x8U => self.unop(idx, Lang::I32x4ExtendHighI16x8U), - Operator::I32x4Shl => self.binop(idx, Lang::I32x4Shl), - Operator::I32x4ShrS => self.binop(idx, Lang::I32x4ShrS), - Operator::I32x4ShrU => self.binop(idx, Lang::I32x4ShrU), - Operator::I32x4Add => self.binop(idx, Lang::I32x4Add), - Operator::I32x4Sub => self.binop(idx, Lang::I32x4Sub), - Operator::I32x4Mul => self.binop(idx, Lang::I32x4Mul), - Operator::I32x4MinS => self.binop(idx, Lang::I32x4MinS), - Operator::I32x4MinU => self.binop(idx, Lang::I32x4MinU), - Operator::I32x4MaxS => self.binop(idx, Lang::I32x4MaxS), - Operator::I32x4MaxU => self.binop(idx, Lang::I32x4MaxU), - Operator::I32x4DotI16x8S => self.binop(idx, Lang::I32x4DotI16x8S), - Operator::I32x4ExtMulLowI16x8S => self.binop(idx, Lang::I32x4ExtMulLowI16x8S), - Operator::I32x4ExtMulHighI16x8S => self.binop(idx, Lang::I32x4ExtMulHighI16x8S), - Operator::I32x4ExtMulLowI16x8U => self.binop(idx, Lang::I32x4ExtMulLowI16x8U), - Operator::I32x4ExtMulHighI16x8U => self.binop(idx, Lang::I32x4ExtMulHighI16x8U), - - Operator::I64x2Abs => self.unop(idx, Lang::I64x2Abs), - Operator::I64x2Neg => self.unop(idx, Lang::I64x2Neg), - Operator::I64x2AllTrue => self.unop(idx, Lang::I64x2AllTrue), - Operator::I64x2Bitmask => self.unop(idx, Lang::I64x2Bitmask), - Operator::I64x2ExtendLowI32x4S => self.unop(idx, Lang::I64x2ExtendLowI32x4S), - Operator::I64x2ExtendHighI32x4S => self.unop(idx, Lang::I64x2ExtendHighI32x4S), - Operator::I64x2ExtendLowI32x4U => self.unop(idx, Lang::I64x2ExtendLowI32x4U), - Operator::I64x2ExtendHighI32x4U => self.unop(idx, Lang::I64x2ExtendHighI32x4U), - Operator::I64x2Shl => self.binop(idx, Lang::I64x2Shl), - Operator::I64x2ShrS => self.binop(idx, Lang::I64x2ShrS), - Operator::I64x2ShrU => self.binop(idx, Lang::I64x2ShrU), - Operator::I64x2Add => self.binop(idx, Lang::I64x2Add), - Operator::I64x2Sub => self.binop(idx, Lang::I64x2Sub), - Operator::I64x2Mul => self.binop(idx, Lang::I64x2Mul), - Operator::I64x2ExtMulLowI32x4S => self.binop(idx, Lang::I64x2ExtMulLowI32x4S), - Operator::I64x2ExtMulHighI32x4S => self.binop(idx, Lang::I64x2ExtMulHighI32x4S), - Operator::I64x2ExtMulLowI32x4U => self.binop(idx, Lang::I64x2ExtMulLowI32x4U), - Operator::I64x2ExtMulHighI32x4U => self.binop(idx, Lang::I64x2ExtMulHighI32x4U), - - Operator::F32x4Ceil => self.unop(idx, Lang::F32x4Ceil), - Operator::F32x4Floor => self.unop(idx, Lang::F32x4Floor), - Operator::F32x4Trunc => self.unop(idx, Lang::F32x4Trunc), - Operator::F32x4Nearest => self.unop(idx, Lang::F32x4Nearest), - Operator::F32x4Abs => self.unop(idx, Lang::F32x4Abs), - Operator::F32x4Neg => self.unop(idx, Lang::F32x4Neg), - Operator::F32x4Sqrt => self.unop(idx, Lang::F32x4Sqrt), - Operator::F32x4Add => self.binop(idx, Lang::F32x4Add), - Operator::F32x4Sub => self.binop(idx, Lang::F32x4Sub), - Operator::F32x4Mul => self.binop(idx, Lang::F32x4Mul), - Operator::F32x4Div => self.binop(idx, Lang::F32x4Div), - Operator::F32x4Min => self.binop(idx, Lang::F32x4Min), - Operator::F32x4Max => self.binop(idx, Lang::F32x4Max), - Operator::F32x4PMin => self.binop(idx, Lang::F32x4PMin), - Operator::F32x4PMax => self.binop(idx, Lang::F32x4PMax), - Operator::F64x2Ceil => self.unop(idx, Lang::F64x2Ceil), - Operator::F64x2Floor => self.unop(idx, Lang::F64x2Floor), - Operator::F64x2Trunc => self.unop(idx, Lang::F64x2Trunc), - Operator::F64x2Nearest => self.unop(idx, Lang::F64x2Nearest), - Operator::F64x2Abs => self.unop(idx, Lang::F64x2Abs), - Operator::F64x2Neg => self.unop(idx, Lang::F64x2Neg), - Operator::F64x2Sqrt => self.unop(idx, Lang::F64x2Sqrt), - Operator::F64x2Add => self.binop(idx, Lang::F64x2Add), - Operator::F64x2Sub => self.binop(idx, Lang::F64x2Sub), - Operator::F64x2Mul => self.binop(idx, Lang::F64x2Mul), - Operator::F64x2Div => self.binop(idx, Lang::F64x2Div), - Operator::F64x2Min => self.binop(idx, Lang::F64x2Min), - Operator::F64x2Max => self.binop(idx, Lang::F64x2Max), - Operator::F64x2PMin => self.binop(idx, Lang::F64x2PMin), - Operator::F64x2PMax => self.binop(idx, Lang::F64x2PMax), - - Operator::I32x4TruncSatF32x4S => self.unop(idx, Lang::I32x4TruncSatF32x4S), - Operator::I32x4TruncSatF32x4U => self.unop(idx, Lang::I32x4TruncSatF32x4U), - Operator::F32x4ConvertI32x4S => self.unop(idx, Lang::F32x4ConvertI32x4S), - Operator::F32x4ConvertI32x4U => self.unop(idx, Lang::F32x4ConvertI32x4U), - Operator::I32x4TruncSatF64x2SZero => self.unop(idx, Lang::I32x4TruncSatF64x2SZero), - Operator::I32x4TruncSatF64x2UZero => self.unop(idx, Lang::I32x4TruncSatF64x2UZero), - Operator::F64x2ConvertLowI32x4S => self.unop(idx, Lang::F64x2ConvertLowI32x4S), - Operator::F64x2ConvertLowI32x4U => self.unop(idx, Lang::F64x2ConvertLowI32x4U), - Operator::F32x4DemoteF64x2Zero => self.unop(idx, Lang::F32x4DemoteF64x2Zero), - Operator::F64x2PromoteLowF32x4 => self.unop(idx, Lang::F64x2PromoteLowF32x4), + Op::Simd(SimdOp::I32x4Abs) => self.unop(idx, Lang::I32x4Abs), + Op::Simd(SimdOp::I32x4Neg) => self.unop(idx, Lang::I32x4Neg), + Op::Simd(SimdOp::I32x4AllTrue) => self.unop(idx, Lang::I32x4AllTrue), + Op::Simd(SimdOp::I32x4Bitmask) => self.unop(idx, Lang::I32x4Bitmask), + Op::Simd(SimdOp::I32x4ExtendLowI16x8S) => self.unop(idx, Lang::I32x4ExtendLowI16x8S), + Op::Simd(SimdOp::I32x4ExtendHighI16x8S) => self.unop(idx, Lang::I32x4ExtendHighI16x8S), + Op::Simd(SimdOp::I32x4ExtendLowI16x8U) => self.unop(idx, Lang::I32x4ExtendLowI16x8U), + Op::Simd(SimdOp::I32x4ExtendHighI16x8U) => self.unop(idx, Lang::I32x4ExtendHighI16x8U), + Op::Simd(SimdOp::I32x4Shl) => self.binop(idx, Lang::I32x4Shl), + Op::Simd(SimdOp::I32x4ShrS) => self.binop(idx, Lang::I32x4ShrS), + Op::Simd(SimdOp::I32x4ShrU) => self.binop(idx, Lang::I32x4ShrU), + Op::Simd(SimdOp::I32x4Add) => self.binop(idx, Lang::I32x4Add), + Op::Simd(SimdOp::I32x4Sub) => self.binop(idx, Lang::I32x4Sub), + Op::Simd(SimdOp::I32x4Mul) => self.binop(idx, Lang::I32x4Mul), + Op::Simd(SimdOp::I32x4MinS) => self.binop(idx, Lang::I32x4MinS), + Op::Simd(SimdOp::I32x4MinU) => self.binop(idx, Lang::I32x4MinU), + Op::Simd(SimdOp::I32x4MaxS) => self.binop(idx, Lang::I32x4MaxS), + Op::Simd(SimdOp::I32x4MaxU) => self.binop(idx, Lang::I32x4MaxU), + Op::Simd(SimdOp::I32x4DotI16x8S) => self.binop(idx, Lang::I32x4DotI16x8S), + Op::Simd(SimdOp::I32x4ExtMulLowI16x8S) => self.binop(idx, Lang::I32x4ExtMulLowI16x8S), + Op::Simd(SimdOp::I32x4ExtMulHighI16x8S) => self.binop(idx, Lang::I32x4ExtMulHighI16x8S), + Op::Simd(SimdOp::I32x4ExtMulLowI16x8U) => self.binop(idx, Lang::I32x4ExtMulLowI16x8U), + Op::Simd(SimdOp::I32x4ExtMulHighI16x8U) => self.binop(idx, Lang::I32x4ExtMulHighI16x8U), + + Op::Simd(SimdOp::I64x2Abs) => self.unop(idx, Lang::I64x2Abs), + Op::Simd(SimdOp::I64x2Neg) => self.unop(idx, Lang::I64x2Neg), + Op::Simd(SimdOp::I64x2AllTrue) => self.unop(idx, Lang::I64x2AllTrue), + Op::Simd(SimdOp::I64x2Bitmask) => self.unop(idx, Lang::I64x2Bitmask), + Op::Simd(SimdOp::I64x2ExtendLowI32x4S) => self.unop(idx, Lang::I64x2ExtendLowI32x4S), + Op::Simd(SimdOp::I64x2ExtendHighI32x4S) => self.unop(idx, Lang::I64x2ExtendHighI32x4S), + Op::Simd(SimdOp::I64x2ExtendLowI32x4U) => self.unop(idx, Lang::I64x2ExtendLowI32x4U), + Op::Simd(SimdOp::I64x2ExtendHighI32x4U) => self.unop(idx, Lang::I64x2ExtendHighI32x4U), + Op::Simd(SimdOp::I64x2Shl) => self.binop(idx, Lang::I64x2Shl), + Op::Simd(SimdOp::I64x2ShrS) => self.binop(idx, Lang::I64x2ShrS), + Op::Simd(SimdOp::I64x2ShrU) => self.binop(idx, Lang::I64x2ShrU), + Op::Simd(SimdOp::I64x2Add) => self.binop(idx, Lang::I64x2Add), + Op::Simd(SimdOp::I64x2Sub) => self.binop(idx, Lang::I64x2Sub), + Op::Simd(SimdOp::I64x2Mul) => self.binop(idx, Lang::I64x2Mul), + Op::Simd(SimdOp::I64x2ExtMulLowI32x4S) => self.binop(idx, Lang::I64x2ExtMulLowI32x4S), + Op::Simd(SimdOp::I64x2ExtMulHighI32x4S) => self.binop(idx, Lang::I64x2ExtMulHighI32x4S), + Op::Simd(SimdOp::I64x2ExtMulLowI32x4U) => self.binop(idx, Lang::I64x2ExtMulLowI32x4U), + Op::Simd(SimdOp::I64x2ExtMulHighI32x4U) => self.binop(idx, Lang::I64x2ExtMulHighI32x4U), + + Op::Simd(SimdOp::F32x4Ceil) => self.unop(idx, Lang::F32x4Ceil), + Op::Simd(SimdOp::F32x4Floor) => self.unop(idx, Lang::F32x4Floor), + Op::Simd(SimdOp::F32x4Trunc) => self.unop(idx, Lang::F32x4Trunc), + Op::Simd(SimdOp::F32x4Nearest) => self.unop(idx, Lang::F32x4Nearest), + Op::Simd(SimdOp::F32x4Abs) => self.unop(idx, Lang::F32x4Abs), + Op::Simd(SimdOp::F32x4Neg) => self.unop(idx, Lang::F32x4Neg), + Op::Simd(SimdOp::F32x4Sqrt) => self.unop(idx, Lang::F32x4Sqrt), + Op::Simd(SimdOp::F32x4Add) => self.binop(idx, Lang::F32x4Add), + Op::Simd(SimdOp::F32x4Sub) => self.binop(idx, Lang::F32x4Sub), + Op::Simd(SimdOp::F32x4Mul) => self.binop(idx, Lang::F32x4Mul), + Op::Simd(SimdOp::F32x4Div) => self.binop(idx, Lang::F32x4Div), + Op::Simd(SimdOp::F32x4Min) => self.binop(idx, Lang::F32x4Min), + Op::Simd(SimdOp::F32x4Max) => self.binop(idx, Lang::F32x4Max), + Op::Simd(SimdOp::F32x4PMin) => self.binop(idx, Lang::F32x4PMin), + Op::Simd(SimdOp::F32x4PMax) => self.binop(idx, Lang::F32x4PMax), + Op::Simd(SimdOp::F64x2Ceil) => self.unop(idx, Lang::F64x2Ceil), + Op::Simd(SimdOp::F64x2Floor) => self.unop(idx, Lang::F64x2Floor), + Op::Simd(SimdOp::F64x2Trunc) => self.unop(idx, Lang::F64x2Trunc), + Op::Simd(SimdOp::F64x2Nearest) => self.unop(idx, Lang::F64x2Nearest), + Op::Simd(SimdOp::F64x2Abs) => self.unop(idx, Lang::F64x2Abs), + Op::Simd(SimdOp::F64x2Neg) => self.unop(idx, Lang::F64x2Neg), + Op::Simd(SimdOp::F64x2Sqrt) => self.unop(idx, Lang::F64x2Sqrt), + Op::Simd(SimdOp::F64x2Add) => self.binop(idx, Lang::F64x2Add), + Op::Simd(SimdOp::F64x2Sub) => self.binop(idx, Lang::F64x2Sub), + Op::Simd(SimdOp::F64x2Mul) => self.binop(idx, Lang::F64x2Mul), + Op::Simd(SimdOp::F64x2Div) => self.binop(idx, Lang::F64x2Div), + Op::Simd(SimdOp::F64x2Min) => self.binop(idx, Lang::F64x2Min), + Op::Simd(SimdOp::F64x2Max) => self.binop(idx, Lang::F64x2Max), + Op::Simd(SimdOp::F64x2PMin) => self.binop(idx, Lang::F64x2PMin), + Op::Simd(SimdOp::F64x2PMax) => self.binop(idx, Lang::F64x2PMax), + + Op::Simd(SimdOp::I32x4TruncSatF32x4S) => self.unop(idx, Lang::I32x4TruncSatF32x4S), + Op::Simd(SimdOp::I32x4TruncSatF32x4U) => self.unop(idx, Lang::I32x4TruncSatF32x4U), + Op::Simd(SimdOp::F32x4ConvertI32x4S) => self.unop(idx, Lang::F32x4ConvertI32x4S), + Op::Simd(SimdOp::F32x4ConvertI32x4U) => self.unop(idx, Lang::F32x4ConvertI32x4U), + Op::Simd(SimdOp::I32x4TruncSatF64x2SZero) => self.unop(idx, Lang::I32x4TruncSatF64x2SZero), + Op::Simd(SimdOp::I32x4TruncSatF64x2UZero) => self.unop(idx, Lang::I32x4TruncSatF64x2UZero), + Op::Simd(SimdOp::F64x2ConvertLowI32x4S) => self.unop(idx, Lang::F64x2ConvertLowI32x4S), + Op::Simd(SimdOp::F64x2ConvertLowI32x4U) => self.unop(idx, Lang::F64x2ConvertLowI32x4U), + Op::Simd(SimdOp::F32x4DemoteF64x2Zero) => self.unop(idx, Lang::F32x4DemoteF64x2Zero), + Op::Simd(SimdOp::F64x2PromoteLowF32x4) => self.unop(idx, Lang::F64x2PromoteLowF32x4), op => { // If the operator is not implemented, warn and bail out. We From 76d4d6a5e152ed8a9a0991b0e26e24e77e6898ac Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Wed, 20 Nov 2024 00:26:17 +0100 Subject: [PATCH 37/83] apply rustfmt --- .../wasm-mutate/src/mutators/peephole/dfg.rs | 156 +++++++++++++----- 1 file changed, 117 insertions(+), 39 deletions(-) diff --git a/crates/wasm-mutate/src/mutators/peephole/dfg.rs b/crates/wasm-mutate/src/mutators/peephole/dfg.rs index 20df4279b2..e0a2a3d368 100644 --- a/crates/wasm-mutate/src/mutators/peephole/dfg.rs +++ b/crates/wasm-mutate/src/mutators/peephole/dfg.rs @@ -780,13 +780,27 @@ impl<'a> DFGBuilder { } Op::Simd(SimdOp::V128Load { memarg }) => self.load(idx, memarg, Lang::V128Load), - Op::Simd(SimdOp::V128Load8x8S { memarg }) => self.load(idx, memarg, Lang::V128Load8x8S), - Op::Simd(SimdOp::V128Load8x8U { memarg }) => self.load(idx, memarg, Lang::V128Load8x8U), - Op::Simd(SimdOp::V128Load16x4S { memarg }) => self.load(idx, memarg, Lang::V128Load16x4S), - Op::Simd(SimdOp::V128Load16x4U { memarg }) => self.load(idx, memarg, Lang::V128Load16x4U), - Op::Simd(SimdOp::V128Load32x2S { memarg }) => self.load(idx, memarg, Lang::V128Load32x2S), - Op::Simd(SimdOp::V128Load32x2U { memarg }) => self.load(idx, memarg, Lang::V128Load32x2U), - Op::Simd(SimdOp::V128Load8Splat { memarg }) => self.load(idx, memarg, Lang::V128Load8Splat), + Op::Simd(SimdOp::V128Load8x8S { memarg }) => { + self.load(idx, memarg, Lang::V128Load8x8S) + } + Op::Simd(SimdOp::V128Load8x8U { memarg }) => { + self.load(idx, memarg, Lang::V128Load8x8U) + } + Op::Simd(SimdOp::V128Load16x4S { memarg }) => { + self.load(idx, memarg, Lang::V128Load16x4S) + } + Op::Simd(SimdOp::V128Load16x4U { memarg }) => { + self.load(idx, memarg, Lang::V128Load16x4U) + } + Op::Simd(SimdOp::V128Load32x2S { memarg }) => { + self.load(idx, memarg, Lang::V128Load32x2S) + } + Op::Simd(SimdOp::V128Load32x2U { memarg }) => { + self.load(idx, memarg, Lang::V128Load32x2U) + } + Op::Simd(SimdOp::V128Load8Splat { memarg }) => { + self.load(idx, memarg, Lang::V128Load8Splat) + } Op::Simd(SimdOp::V128Load16Splat { memarg }) => { self.load(idx, memarg, Lang::V128Load16Splat) } @@ -796,8 +810,12 @@ impl<'a> DFGBuilder { Op::Simd(SimdOp::V128Load64Splat { memarg }) => { self.load(idx, memarg, Lang::V128Load64Splat) } - Op::Simd(SimdOp::V128Load32Zero { memarg }) => self.load(idx, memarg, Lang::V128Load32Zero), - Op::Simd(SimdOp::V128Load64Zero { memarg }) => self.load(idx, memarg, Lang::V128Load64Zero), + Op::Simd(SimdOp::V128Load32Zero { memarg }) => { + self.load(idx, memarg, Lang::V128Load32Zero) + } + Op::Simd(SimdOp::V128Load64Zero { memarg }) => { + self.load(idx, memarg, Lang::V128Load64Zero) + } Op::Simd(SimdOp::V128Store { memarg }) => self.store(idx, memarg, Lang::V128Store), Op::Simd(SimdOp::V128Load8Lane { memarg, lane }) => { self.load_lane(idx, memarg, lane, Lang::V128Load8Lane) @@ -964,10 +982,18 @@ impl<'a> DFGBuilder { Op::Simd(SimdOp::I16x8Bitmask) => self.unop(idx, Lang::I16x8Bitmask), Op::Simd(SimdOp::I16x8NarrowI32x4S) => self.binop(idx, Lang::I16x8NarrowI32x4S), Op::Simd(SimdOp::I16x8NarrowI32x4U) => self.binop(idx, Lang::I16x8NarrowI32x4U), - Op::Simd(SimdOp::I16x8ExtendLowI8x16S) => self.unop(idx, Lang::I16x8ExtendLowI8x16S), - Op::Simd(SimdOp::I16x8ExtendHighI8x16S) => self.unop(idx, Lang::I16x8ExtendHighI8x16S), - Op::Simd(SimdOp::I16x8ExtendLowI8x16U) => self.unop(idx, Lang::I16x8ExtendLowI8x16U), - Op::Simd(SimdOp::I16x8ExtendHighI8x16U) => self.unop(idx, Lang::I16x8ExtendHighI8x16U), + Op::Simd(SimdOp::I16x8ExtendLowI8x16S) => { + self.unop(idx, Lang::I16x8ExtendLowI8x16S) + } + Op::Simd(SimdOp::I16x8ExtendHighI8x16S) => { + self.unop(idx, Lang::I16x8ExtendHighI8x16S) + } + Op::Simd(SimdOp::I16x8ExtendLowI8x16U) => { + self.unop(idx, Lang::I16x8ExtendLowI8x16U) + } + Op::Simd(SimdOp::I16x8ExtendHighI8x16U) => { + self.unop(idx, Lang::I16x8ExtendHighI8x16U) + } Op::Simd(SimdOp::I16x8Shl) => self.binop(idx, Lang::I16x8Shl), Op::Simd(SimdOp::I16x8ShrS) => self.binop(idx, Lang::I16x8ShrS), Op::Simd(SimdOp::I16x8ShrU) => self.binop(idx, Lang::I16x8ShrU), @@ -983,10 +1009,18 @@ impl<'a> DFGBuilder { Op::Simd(SimdOp::I16x8MaxS) => self.binop(idx, Lang::I16x8MaxS), Op::Simd(SimdOp::I16x8MaxU) => self.binop(idx, Lang::I16x8MaxU), Op::Simd(SimdOp::I16x8AvgrU) => self.binop(idx, Lang::I16x8AvgrU), - Op::Simd(SimdOp::I16x8ExtMulLowI8x16S) => self.binop(idx, Lang::I16x8ExtMulLowI8x16S), - Op::Simd(SimdOp::I16x8ExtMulHighI8x16S) => self.binop(idx, Lang::I16x8ExtMulHighI8x16S), - Op::Simd(SimdOp::I16x8ExtMulLowI8x16U) => self.binop(idx, Lang::I16x8ExtMulLowI8x16U), - Op::Simd(SimdOp::I16x8ExtMulHighI8x16U) => self.binop(idx, Lang::I16x8ExtMulHighI8x16U), + Op::Simd(SimdOp::I16x8ExtMulLowI8x16S) => { + self.binop(idx, Lang::I16x8ExtMulLowI8x16S) + } + Op::Simd(SimdOp::I16x8ExtMulHighI8x16S) => { + self.binop(idx, Lang::I16x8ExtMulHighI8x16S) + } + Op::Simd(SimdOp::I16x8ExtMulLowI8x16U) => { + self.binop(idx, Lang::I16x8ExtMulLowI8x16U) + } + Op::Simd(SimdOp::I16x8ExtMulHighI8x16U) => { + self.binop(idx, Lang::I16x8ExtMulHighI8x16U) + } Op::Simd(SimdOp::I32x4ExtAddPairwiseI16x8S) => { self.unop(idx, Lang::I32x4ExtAddPairwiseI16x8S) @@ -998,10 +1032,18 @@ impl<'a> DFGBuilder { Op::Simd(SimdOp::I32x4Neg) => self.unop(idx, Lang::I32x4Neg), Op::Simd(SimdOp::I32x4AllTrue) => self.unop(idx, Lang::I32x4AllTrue), Op::Simd(SimdOp::I32x4Bitmask) => self.unop(idx, Lang::I32x4Bitmask), - Op::Simd(SimdOp::I32x4ExtendLowI16x8S) => self.unop(idx, Lang::I32x4ExtendLowI16x8S), - Op::Simd(SimdOp::I32x4ExtendHighI16x8S) => self.unop(idx, Lang::I32x4ExtendHighI16x8S), - Op::Simd(SimdOp::I32x4ExtendLowI16x8U) => self.unop(idx, Lang::I32x4ExtendLowI16x8U), - Op::Simd(SimdOp::I32x4ExtendHighI16x8U) => self.unop(idx, Lang::I32x4ExtendHighI16x8U), + Op::Simd(SimdOp::I32x4ExtendLowI16x8S) => { + self.unop(idx, Lang::I32x4ExtendLowI16x8S) + } + Op::Simd(SimdOp::I32x4ExtendHighI16x8S) => { + self.unop(idx, Lang::I32x4ExtendHighI16x8S) + } + Op::Simd(SimdOp::I32x4ExtendLowI16x8U) => { + self.unop(idx, Lang::I32x4ExtendLowI16x8U) + } + Op::Simd(SimdOp::I32x4ExtendHighI16x8U) => { + self.unop(idx, Lang::I32x4ExtendHighI16x8U) + } Op::Simd(SimdOp::I32x4Shl) => self.binop(idx, Lang::I32x4Shl), Op::Simd(SimdOp::I32x4ShrS) => self.binop(idx, Lang::I32x4ShrS), Op::Simd(SimdOp::I32x4ShrU) => self.binop(idx, Lang::I32x4ShrU), @@ -1013,29 +1055,53 @@ impl<'a> DFGBuilder { Op::Simd(SimdOp::I32x4MaxS) => self.binop(idx, Lang::I32x4MaxS), Op::Simd(SimdOp::I32x4MaxU) => self.binop(idx, Lang::I32x4MaxU), Op::Simd(SimdOp::I32x4DotI16x8S) => self.binop(idx, Lang::I32x4DotI16x8S), - Op::Simd(SimdOp::I32x4ExtMulLowI16x8S) => self.binop(idx, Lang::I32x4ExtMulLowI16x8S), - Op::Simd(SimdOp::I32x4ExtMulHighI16x8S) => self.binop(idx, Lang::I32x4ExtMulHighI16x8S), - Op::Simd(SimdOp::I32x4ExtMulLowI16x8U) => self.binop(idx, Lang::I32x4ExtMulLowI16x8U), - Op::Simd(SimdOp::I32x4ExtMulHighI16x8U) => self.binop(idx, Lang::I32x4ExtMulHighI16x8U), + Op::Simd(SimdOp::I32x4ExtMulLowI16x8S) => { + self.binop(idx, Lang::I32x4ExtMulLowI16x8S) + } + Op::Simd(SimdOp::I32x4ExtMulHighI16x8S) => { + self.binop(idx, Lang::I32x4ExtMulHighI16x8S) + } + Op::Simd(SimdOp::I32x4ExtMulLowI16x8U) => { + self.binop(idx, Lang::I32x4ExtMulLowI16x8U) + } + Op::Simd(SimdOp::I32x4ExtMulHighI16x8U) => { + self.binop(idx, Lang::I32x4ExtMulHighI16x8U) + } Op::Simd(SimdOp::I64x2Abs) => self.unop(idx, Lang::I64x2Abs), Op::Simd(SimdOp::I64x2Neg) => self.unop(idx, Lang::I64x2Neg), Op::Simd(SimdOp::I64x2AllTrue) => self.unop(idx, Lang::I64x2AllTrue), Op::Simd(SimdOp::I64x2Bitmask) => self.unop(idx, Lang::I64x2Bitmask), - Op::Simd(SimdOp::I64x2ExtendLowI32x4S) => self.unop(idx, Lang::I64x2ExtendLowI32x4S), - Op::Simd(SimdOp::I64x2ExtendHighI32x4S) => self.unop(idx, Lang::I64x2ExtendHighI32x4S), - Op::Simd(SimdOp::I64x2ExtendLowI32x4U) => self.unop(idx, Lang::I64x2ExtendLowI32x4U), - Op::Simd(SimdOp::I64x2ExtendHighI32x4U) => self.unop(idx, Lang::I64x2ExtendHighI32x4U), + Op::Simd(SimdOp::I64x2ExtendLowI32x4S) => { + self.unop(idx, Lang::I64x2ExtendLowI32x4S) + } + Op::Simd(SimdOp::I64x2ExtendHighI32x4S) => { + self.unop(idx, Lang::I64x2ExtendHighI32x4S) + } + Op::Simd(SimdOp::I64x2ExtendLowI32x4U) => { + self.unop(idx, Lang::I64x2ExtendLowI32x4U) + } + Op::Simd(SimdOp::I64x2ExtendHighI32x4U) => { + self.unop(idx, Lang::I64x2ExtendHighI32x4U) + } Op::Simd(SimdOp::I64x2Shl) => self.binop(idx, Lang::I64x2Shl), Op::Simd(SimdOp::I64x2ShrS) => self.binop(idx, Lang::I64x2ShrS), Op::Simd(SimdOp::I64x2ShrU) => self.binop(idx, Lang::I64x2ShrU), Op::Simd(SimdOp::I64x2Add) => self.binop(idx, Lang::I64x2Add), Op::Simd(SimdOp::I64x2Sub) => self.binop(idx, Lang::I64x2Sub), Op::Simd(SimdOp::I64x2Mul) => self.binop(idx, Lang::I64x2Mul), - Op::Simd(SimdOp::I64x2ExtMulLowI32x4S) => self.binop(idx, Lang::I64x2ExtMulLowI32x4S), - Op::Simd(SimdOp::I64x2ExtMulHighI32x4S) => self.binop(idx, Lang::I64x2ExtMulHighI32x4S), - Op::Simd(SimdOp::I64x2ExtMulLowI32x4U) => self.binop(idx, Lang::I64x2ExtMulLowI32x4U), - Op::Simd(SimdOp::I64x2ExtMulHighI32x4U) => self.binop(idx, Lang::I64x2ExtMulHighI32x4U), + Op::Simd(SimdOp::I64x2ExtMulLowI32x4S) => { + self.binop(idx, Lang::I64x2ExtMulLowI32x4S) + } + Op::Simd(SimdOp::I64x2ExtMulHighI32x4S) => { + self.binop(idx, Lang::I64x2ExtMulHighI32x4S) + } + Op::Simd(SimdOp::I64x2ExtMulLowI32x4U) => { + self.binop(idx, Lang::I64x2ExtMulLowI32x4U) + } + Op::Simd(SimdOp::I64x2ExtMulHighI32x4U) => { + self.binop(idx, Lang::I64x2ExtMulHighI32x4U) + } Op::Simd(SimdOp::F32x4Ceil) => self.unop(idx, Lang::F32x4Ceil), Op::Simd(SimdOp::F32x4Floor) => self.unop(idx, Lang::F32x4Floor), @@ -1072,12 +1138,24 @@ impl<'a> DFGBuilder { Op::Simd(SimdOp::I32x4TruncSatF32x4U) => self.unop(idx, Lang::I32x4TruncSatF32x4U), Op::Simd(SimdOp::F32x4ConvertI32x4S) => self.unop(idx, Lang::F32x4ConvertI32x4S), Op::Simd(SimdOp::F32x4ConvertI32x4U) => self.unop(idx, Lang::F32x4ConvertI32x4U), - Op::Simd(SimdOp::I32x4TruncSatF64x2SZero) => self.unop(idx, Lang::I32x4TruncSatF64x2SZero), - Op::Simd(SimdOp::I32x4TruncSatF64x2UZero) => self.unop(idx, Lang::I32x4TruncSatF64x2UZero), - Op::Simd(SimdOp::F64x2ConvertLowI32x4S) => self.unop(idx, Lang::F64x2ConvertLowI32x4S), - Op::Simd(SimdOp::F64x2ConvertLowI32x4U) => self.unop(idx, Lang::F64x2ConvertLowI32x4U), - Op::Simd(SimdOp::F32x4DemoteF64x2Zero) => self.unop(idx, Lang::F32x4DemoteF64x2Zero), - Op::Simd(SimdOp::F64x2PromoteLowF32x4) => self.unop(idx, Lang::F64x2PromoteLowF32x4), + Op::Simd(SimdOp::I32x4TruncSatF64x2SZero) => { + self.unop(idx, Lang::I32x4TruncSatF64x2SZero) + } + Op::Simd(SimdOp::I32x4TruncSatF64x2UZero) => { + self.unop(idx, Lang::I32x4TruncSatF64x2UZero) + } + Op::Simd(SimdOp::F64x2ConvertLowI32x4S) => { + self.unop(idx, Lang::F64x2ConvertLowI32x4S) + } + Op::Simd(SimdOp::F64x2ConvertLowI32x4U) => { + self.unop(idx, Lang::F64x2ConvertLowI32x4U) + } + Op::Simd(SimdOp::F32x4DemoteF64x2Zero) => { + self.unop(idx, Lang::F32x4DemoteF64x2Zero) + } + Op::Simd(SimdOp::F64x2PromoteLowF32x4) => { + self.unop(idx, Lang::F64x2PromoteLowF32x4) + } op => { // If the operator is not implemented, warn and bail out. We From 7c7eb4674dad499fb4ee42806ccdc9cdb04eb2ab Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Wed, 20 Nov 2024 00:32:02 +0100 Subject: [PATCH 38/83] fix dev-dependencies for wasm-mutate --- crates/wasm-mutate/Cargo.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/wasm-mutate/Cargo.toml b/crates/wasm-mutate/Cargo.toml index a280018e63..f9b4c520dd 100644 --- a/crates/wasm-mutate/Cargo.toml +++ b/crates/wasm-mutate/Cargo.toml @@ -13,8 +13,8 @@ workspace = true [dependencies] clap = { workspace = true, optional = true } thiserror = "1.0.28" -wasmparser = { workspace = true, features = ["simd"]} -wasm-encoder = { workspace = true, features = ["wasmparser", "simd"] } +wasmparser = { workspace = true, features = ['simd']} +wasm-encoder = { workspace = true, features = ['wasmparser', 'simd'] } rand = { workspace = true } log = { workspace = true } egg = "0.6.0" @@ -24,4 +24,4 @@ anyhow = { workspace = true } wat = { workspace = true } wasmprinter = { workspace = true } env_logger = { workspace = true } -wasmparser = { workspace = true, features = ['validate', 'features'] } +wasmparser = { workspace = true, features = ['validate', 'features', 'simd'] } From 73fb3ba7aa1d9a5410f68c8fb6386f8929d6aed6 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Wed, 20 Nov 2024 00:37:56 +0100 Subject: [PATCH 39/83] add simd_visitor impl to NopVisitor --- crates/wasmparser/benches/benchmark.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/crates/wasmparser/benches/benchmark.rs b/crates/wasmparser/benches/benchmark.rs index af2bc7d84f..16500b95b2 100644 --- a/crates/wasmparser/benches/benchmark.rs +++ b/crates/wasmparser/benches/benchmark.rs @@ -365,6 +365,10 @@ macro_rules! define_visit_operator { impl<'a> VisitOperator<'a> for NopVisit { type Output = (); + fn simd_visitor(&mut self) -> Option<&mut dyn VisitSimdOperator<'a, Output = Self::Output>> { + Some(self) + } + wasmparser::for_each_operator!(define_visit_operator); } From a458b3aa7f6df86ad2c33055d3379ce520bd3058 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Wed, 20 Nov 2024 00:50:28 +0100 Subject: [PATCH 40/83] add missing VisitSimdOperator impl for WasmProposalValidator --- crates/wasmparser/src/validator/operators.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/crates/wasmparser/src/validator/operators.rs b/crates/wasmparser/src/validator/operators.rs index 651373edeb..92a6cb70a6 100644 --- a/crates/wasmparser/src/validator/operators.rs +++ b/crates/wasmparser/src/validator/operators.rs @@ -1755,9 +1755,22 @@ where { type Output = Result<()>; + #[cfg(feature = "simd")] + fn simd_visitor(&mut self) -> Option<&mut dyn VisitSimdOperator<'a, Output = Self::Output>> { + Some(self) + } + for_each_operator!(validate_proposal); } +#[cfg(feature = "simd")] +impl<'a, T> VisitSimdOperator<'a> for WasmProposalValidator<'_, '_, T> +where + T: WasmModuleResources, +{ + crate::for_each_simd_operator!(validate_proposal); +} + #[track_caller] #[inline] fn debug_assert_type_indices_are_ids(ty: ValType) { From 89249231d680964a7e5d9e943d688aad5b337fe8 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Wed, 20 Nov 2024 01:20:53 +0100 Subject: [PATCH 41/83] mark doc example as compile_fail --- crates/wasmparser/src/readers/core/operators.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/wasmparser/src/readers/core/operators.rs b/crates/wasmparser/src/readers/core/operators.rs index d774eebb81..f6f5a68a01 100644 --- a/crates/wasmparser/src/readers/core/operators.rs +++ b/crates/wasmparser/src/readers/core/operators.rs @@ -469,7 +469,7 @@ pub trait VisitOperator<'a> { /// /// # Example /// - /// ``` + /// ```compile_fail /// impl VisitOperator for MyVisitor { /// fn simd_visitor(&mut self) -> Option<&mut dyn VisitSimdOperator<'a, Output = Self::Output>> { /// Some(self) From 8884f9502bb566280fa61387219765ea42998e2b Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Wed, 20 Nov 2024 01:21:10 +0100 Subject: [PATCH 42/83] add simd support for VisitConstOperator --- crates/wasmparser/src/validator/core.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/crates/wasmparser/src/validator/core.rs b/crates/wasmparser/src/validator/core.rs index 4f2deb0629..dc12034a42 100644 --- a/crates/wasmparser/src/validator/core.rs +++ b/crates/wasmparser/src/validator/core.rs @@ -16,6 +16,8 @@ use crate::{ TableInit, TableType, TagType, TypeRef, UnpackedIndex, ValType, VisitOperator, WasmFeatures, WasmModuleResources, }; +#[cfg(feature = "simd")] +use crate::VisitSimdOperator; use crate::{prelude::*, CompositeInnerType}; use alloc::sync::Arc; use core::mem; @@ -431,7 +433,7 @@ impl ModuleState { $self.validator().visit_f64_const($val) }}; (@visit $self:ident visit_v128_const $val:ident) => {{ - $self.validator().visit_v128_const($val) + $self.validator().simd_visitor().unwrap().visit_v128_const($val) }}; (@visit $self:ident visit_ref_null $val:ident) => {{ $self.validator().visit_ref_null($val) @@ -522,8 +524,17 @@ impl ModuleState { impl<'a> VisitOperator<'a> for VisitConstOperator<'a> { type Output = Result<()>; + fn simd_visitor(&mut self) -> Option<&mut dyn crate::VisitSimdOperator<'a, Output = Self::Output>> { + Some(self) + } + for_each_operator!(define_visit_operator); } + + #[cfg(feature = "simd")] + impl<'a> VisitSimdOperator<'a> for VisitConstOperator<'a> { + crate::for_each_simd_operator!(define_visit_operator); + } } } From 0beba9e7879f497080427bde44d6e7c1d2a73992 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Wed, 20 Nov 2024 01:21:45 +0100 Subject: [PATCH 43/83] use wasmparser/simd in validate tool --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 5665df9686..cb5fcc73ed 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -122,7 +122,7 @@ wat = { workspace = true, features = ['dwarf', 'component-model'] } termcolor = { workspace = true } # Dependencies of `validate` -wasmparser = { workspace = true, optional = true, features = ['component-model'] } +wasmparser = { workspace = true, optional = true, features = ['component-model', 'simd'] } rayon = { workspace = true, optional = true } bitflags = { workspace = true, optional = true } From 351e16971d8879504bda80fcb4d32193e93e03c5 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Wed, 20 Nov 2024 01:22:02 +0100 Subject: [PATCH 44/83] add simd crate feature propagation to wasm-tools CLI tool --- Cargo.toml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index cb5fcc73ed..297405dd06 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -203,6 +203,7 @@ default = [ 'compose', 'demangle', 'component', + 'simd', 'metadata', 'wit-smith', 'addr2line', @@ -239,6 +240,11 @@ component = [ 'dep:wasmparser', 'dep:serde_json', ] +simd = [ + 'wasmparser/simd', + 'wasmprinter/simd', + 'wasm-encoder/simd', +] metadata = ['dep:wasmparser', 'wasm-metadata', 'dep:serde_json'] wit-smith = ['dep:wit-smith', 'arbitrary'] addr2line = ['dep:addr2line', 'dep:gimli', 'dep:wasmparser'] From 04f34abea45f98ee34daf8f14b3dc73c6c6b1c30 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Wed, 20 Nov 2024 01:22:20 +0100 Subject: [PATCH 45/83] wasm-smith: use wasmparser/simd crate feature --- crates/wasm-smith/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/wasm-smith/Cargo.toml b/crates/wasm-smith/Cargo.toml index f7275ff381..f432ffac88 100644 --- a/crates/wasm-smith/Cargo.toml +++ b/crates/wasm-smith/Cargo.toml @@ -32,13 +32,13 @@ leb128 = { workspace = true } serde = { workspace = true, optional = true } serde_derive = { workspace = true, optional = true } wasm-encoder = { workspace = true } -wasmparser = { workspace = true, optional = true, features = ['validate', 'features'] } +wasmparser = { workspace = true, optional = true, features = ['validate', 'features', 'simd'] } wat = { workspace = true, optional = true } [dev-dependencies] criterion = { workspace = true } rand = { workspace = true } -wasmparser = { workspace = true, features = ["validate", "features"] } +wasmparser = { workspace = true, features = ['validate', 'features', 'simd'] } wasmprinter = { workspace = true } wat = { workspace = true } From c419be724028c00712b659cdfd5b3b12fb4abc48 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Wed, 20 Nov 2024 01:22:41 +0100 Subject: [PATCH 46/83] apply rustfmt --- crates/wasmparser/src/validator/core.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/crates/wasmparser/src/validator/core.rs b/crates/wasmparser/src/validator/core.rs index dc12034a42..86b9edf75c 100644 --- a/crates/wasmparser/src/validator/core.rs +++ b/crates/wasmparser/src/validator/core.rs @@ -10,14 +10,14 @@ use super::{ operators::{ty_to_str, OperatorValidator, OperatorValidatorAllocations}, types::{CoreTypeId, EntityType, RecGroupId, TypeAlloc, TypeList}, }; +#[cfg(feature = "simd")] +use crate::VisitSimdOperator; use crate::{ limits::*, BinaryReaderError, ConstExpr, Data, DataKind, Element, ElementKind, ExternalKind, FuncType, Global, GlobalType, HeapType, MemoryType, RecGroup, RefType, Result, SubType, Table, TableInit, TableType, TagType, TypeRef, UnpackedIndex, ValType, VisitOperator, WasmFeatures, WasmModuleResources, }; -#[cfg(feature = "simd")] -use crate::VisitSimdOperator; use crate::{prelude::*, CompositeInnerType}; use alloc::sync::Arc; use core::mem; @@ -524,7 +524,9 @@ impl ModuleState { impl<'a> VisitOperator<'a> for VisitConstOperator<'a> { type Output = Result<()>; - fn simd_visitor(&mut self) -> Option<&mut dyn crate::VisitSimdOperator<'a, Output = Self::Output>> { + fn simd_visitor( + &mut self, + ) -> Option<&mut dyn crate::VisitSimdOperator<'a, Output = Self::Output>> { Some(self) } From 5c1eeb4cefa992686f9e989fc1bc4693ffb505c6 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Wed, 20 Nov 2024 01:26:18 +0100 Subject: [PATCH 47/83] feature gate simd_visitor impl --- crates/wasmparser/src/validator/core.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/wasmparser/src/validator/core.rs b/crates/wasmparser/src/validator/core.rs index 86b9edf75c..b6fb1d0f5d 100644 --- a/crates/wasmparser/src/validator/core.rs +++ b/crates/wasmparser/src/validator/core.rs @@ -524,6 +524,7 @@ impl ModuleState { impl<'a> VisitOperator<'a> for VisitConstOperator<'a> { type Output = Result<()>; + #[cfg(feature = "simd")] fn simd_visitor( &mut self, ) -> Option<&mut dyn crate::VisitSimdOperator<'a, Output = Self::Output>> { From cec6bb743c19b271339aa662995deea459284c28 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Thu, 21 Nov 2024 23:34:23 +0100 Subject: [PATCH 48/83] unconditionally enable simd for wasmprinter --- Cargo.toml | 1 - crates/wasmprinter/Cargo.toml | 5 ++--- crates/wasmprinter/src/operator.rs | 5 ----- 3 files changed, 2 insertions(+), 9 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 297405dd06..d3082850b8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -242,7 +242,6 @@ component = [ ] simd = [ 'wasmparser/simd', - 'wasmprinter/simd', 'wasm-encoder/simd', ] metadata = ['dep:wasmparser', 'wasm-metadata', 'dep:serde_json'] diff --git a/crates/wasmprinter/Cargo.toml b/crates/wasmprinter/Cargo.toml index faf62bcc4d..de8f3807a5 100644 --- a/crates/wasmprinter/Cargo.toml +++ b/crates/wasmprinter/Cargo.toml @@ -18,13 +18,12 @@ workspace = true [dependencies] anyhow = { workspace = true } -wasmparser = { workspace = true, features = ['std'] } +wasmparser = { workspace = true, features = ['std', 'simd'] } termcolor = { workspace = true } [dev-dependencies] wat = { path = "../wat" } [features] -default = ['component-model', 'simd'] +default = ['component-model'] component-model = ['wasmparser/component-model'] -simd = ['wasmparser/simd'] diff --git a/crates/wasmprinter/src/operator.rs b/crates/wasmprinter/src/operator.rs index 7b3ea3f9e0..2c2a4a5371 100644 --- a/crates/wasmprinter/src/operator.rs +++ b/crates/wasmprinter/src/operator.rs @@ -1,7 +1,6 @@ use super::{Config, Print, PrintTermcolor, Printer, State}; use anyhow::{anyhow, bail, Result}; use termcolor::{Ansi, NoColor}; -#[cfg(feature = "simd")] use wasmparser::VisitSimdOperator; use wasmparser::{ BinaryReader, BlockType, BrTable, Catch, CompositeInnerType, ContType, FrameKind, FuncType, @@ -387,13 +386,11 @@ impl<'printer, 'state, 'a, 'b> PrintOperator<'printer, 'state, 'a, 'b> { self.printer.print_idx(&self.state.core.element_names, idx) } - #[cfg(feature = "simd")] fn lane(&mut self, lane: u8) -> Result<()> { write!(self.result(), " {lane}")?; Ok(()) } - #[cfg(feature = "simd")] fn lanes(&mut self, lanes: [u8; 16]) -> Result<()> { for lane in lanes.iter() { write!(self.result(), " {lane}")?; @@ -1393,7 +1390,6 @@ macro_rules! define_visit { impl<'a> VisitOperator<'a> for PrintOperator<'_, '_, '_, '_> { type Output = Result<()>; - #[cfg(feature = "simd")] fn simd_visitor(&mut self) -> Option<&mut dyn VisitSimdOperator<'a, Output = Self::Output>> { Some(self) } @@ -1401,7 +1397,6 @@ impl<'a> VisitOperator<'a> for PrintOperator<'_, '_, '_, '_> { wasmparser::for_each_operator!(define_visit); } -#[cfg(feature = "simd")] impl<'a> VisitSimdOperator<'a> for PrintOperator<'_, '_, '_, '_> { wasmparser::for_each_simd_operator!(define_visit); } From 1dfbf6e53b99a10d74c16db00c3bbce40417a298 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Thu, 21 Nov 2024 23:36:38 +0100 Subject: [PATCH 49/83] unconditionally enable simd for wasm-encoder --- Cargo.toml | 1 - crates/wasm-encoder/Cargo.toml | 6 ++---- crates/wasm-encoder/src/reencode.rs | 2 -- crates/wasm-mutate/Cargo.toml | 2 +- 4 files changed, 3 insertions(+), 8 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index d3082850b8..d07cfa87ee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -242,7 +242,6 @@ component = [ ] simd = [ 'wasmparser/simd', - 'wasm-encoder/simd', ] metadata = ['dep:wasmparser', 'wasm-metadata', 'dep:serde_json'] wit-smith = ['dep:wit-smith', 'arbitrary'] diff --git a/crates/wasm-encoder/Cargo.toml b/crates/wasm-encoder/Cargo.toml index e5ce3b6141..25d3ee53e0 100644 --- a/crates/wasm-encoder/Cargo.toml +++ b/crates/wasm-encoder/Cargo.toml @@ -29,14 +29,12 @@ wasmparser = { optional = true, workspace = true } [dev-dependencies] anyhow = { workspace = true } tempfile = "3.2.0" -wasmparser = { path = "../wasmparser" } +wasmparser = { path = "../wasmparser", features = ["simd"] } wasmprinter = { workspace = true } [features] -default = ['component-model', 'simd'] +default = ['component-model'] # On-by-default: conditional support for emitting components in addition to # core modules. component-model = ['wasmparser?/component-model'] -# On-by-default: conditional support for emitting SIMD wasm operators. -simd = ['wasmparser?/simd'] diff --git a/crates/wasm-encoder/src/reencode.rs b/crates/wasm-encoder/src/reencode.rs index 30972eefbf..299df44301 100644 --- a/crates/wasm-encoder/src/reencode.rs +++ b/crates/wasm-encoder/src/reencode.rs @@ -1636,7 +1636,6 @@ pub mod utils { translate_build!(reencoder $op $($($arg)*)?) } )* - #[cfg(feature = "simd")] wasmparser::Operator::Simd(simd_arg) => simd_instruction(reencoder, simd_arg)?, unexpected => unreachable!("encountered unexpected Wasm operator: {unexpected:?}"), }) @@ -1646,7 +1645,6 @@ pub mod utils { wasmparser::for_each_operator!(translate) } - #[cfg(feature = "simd")] fn simd_instruction<'a, T: ?Sized + Reencode>( reencoder: &mut T, arg: wasmparser::SimdOperator, diff --git a/crates/wasm-mutate/Cargo.toml b/crates/wasm-mutate/Cargo.toml index f9b4c520dd..90854ef851 100644 --- a/crates/wasm-mutate/Cargo.toml +++ b/crates/wasm-mutate/Cargo.toml @@ -14,7 +14,7 @@ workspace = true clap = { workspace = true, optional = true } thiserror = "1.0.28" wasmparser = { workspace = true, features = ['simd']} -wasm-encoder = { workspace = true, features = ['wasmparser', 'simd'] } +wasm-encoder = { workspace = true, features = ['wasmparser'] } rand = { workspace = true } log = { workspace = true } egg = "0.6.0" From a98227f9f3bf34dfaae8ae3e6abdb844c3c78fa9 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Thu, 21 Nov 2024 23:38:12 +0100 Subject: [PATCH 50/83] remove wasm-tools simd feature (enable by default) --- Cargo.toml | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index d07cfa87ee..6ec1471b63 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -102,7 +102,7 @@ wasm-metadata = { version = "0.220.0", path = "crates/wasm-metadata" } wasm-mutate = { version = "0.220.0", path = "crates/wasm-mutate" } wasm-shrink = { version = "0.220.0", path = "crates/wasm-shrink" } wasm-smith = { version = "0.220.0", path = "crates/wasm-smith" } -wasmparser = { version = "0.220.0", path = "crates/wasmparser", default-features = false, features = ['std'] } +wasmparser = { version = "0.220.0", path = "crates/wasmparser", default-features = false, features = ['std','simd'] } wasmprinter = { version = "0.220.0", path = "crates/wasmprinter", default-features = false } wast = { version = "220.0.0", path = "crates/wast", default-features = false } wat = { version = "1.220.0", path = "crates/wat", default-features = false } @@ -203,7 +203,6 @@ default = [ 'compose', 'demangle', 'component', - 'simd', 'metadata', 'wit-smith', 'addr2line', @@ -240,9 +239,6 @@ component = [ 'dep:wasmparser', 'dep:serde_json', ] -simd = [ - 'wasmparser/simd', -] metadata = ['dep:wasmparser', 'wasm-metadata', 'dep:serde_json'] wit-smith = ['dep:wit-smith', 'arbitrary'] addr2line = ['dep:addr2line', 'dep:gimli', 'dep:wasmparser'] From efa3d4324b4451631560db8c21bc88e4a44cf816 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Fri, 22 Nov 2024 00:37:57 +0100 Subject: [PATCH 51/83] use macros generate for_each_operator macros - This does not yet provide a for_each_operator macro in for !simd mode but that should be easily added. - This is WIP in that is currently does not handle the documentation of the macros well. We might need to also generate the macro docs in the macros themselves. --- crates/wasmparser/src/for_each_simd_op.rs | 291 ------------------ crates/wasmparser/src/lib.rs | 355 +++++++++++++++++++++- 2 files changed, 353 insertions(+), 293 deletions(-) delete mode 100644 crates/wasmparser/src/for_each_simd_op.rs diff --git a/crates/wasmparser/src/for_each_simd_op.rs b/crates/wasmparser/src/for_each_simd_op.rs deleted file mode 100644 index ca883ce6ed..0000000000 --- a/crates/wasmparser/src/for_each_simd_op.rs +++ /dev/null @@ -1,291 +0,0 @@ -/// A helper macro to conveniently iterate over all opcodes recognized by this -/// crate. This can be used to work with either the [`SimdOperator`] enumeration or -/// the [`VisitSimdOperator`] trait if your use case uniformly handles all operators -/// the same way. -/// -/// The list of specializable Wasm proposals is as follows: -/// -/// - `@simd`: [Wasm `simd` proposal] -/// - `@relaxed_simd`: [Wasm `relaxed-simd` proposal] -/// -/// For more information about the structure and use of this macro please -/// refer to the documentation of the [`for_each_operator`] macro. -/// -/// [Wasm `simd` proposal]: -/// https://github.com/webassembly/simd -/// -/// [Wasm `relaxed-simd` proposal]: -/// https://github.com/WebAssembly/relaxed-simd -/// -/// [`SimdOperator`]: crate::SimdOperator -/// [`VisitSimdOperator`]: crate::VisitSimdOperator -#[macro_export] -macro_rules! for_each_simd_operator { - ($mac:ident) => { - $mac! { - // 0xFD operators - // 128-bit SIMD - // - https://github.com/webassembly/simd - // - https://webassembly.github.io/simd/core/binary/instructions.html - @simd V128Load { memarg: $crate::MemArg } => visit_v128_load (load v128) - @simd V128Load8x8S { memarg: $crate::MemArg } => visit_v128_load8x8_s (load v128) - @simd V128Load8x8U { memarg: $crate::MemArg } => visit_v128_load8x8_u (load v128) - @simd V128Load16x4S { memarg: $crate::MemArg } => visit_v128_load16x4_s (load v128) - @simd V128Load16x4U { memarg: $crate::MemArg } => visit_v128_load16x4_u (load v128) - @simd V128Load32x2S { memarg: $crate::MemArg } => visit_v128_load32x2_s (load v128) - @simd V128Load32x2U { memarg: $crate::MemArg } => visit_v128_load32x2_u (load v128) - @simd V128Load8Splat { memarg: $crate::MemArg } => visit_v128_load8_splat (load v128) - @simd V128Load16Splat { memarg: $crate::MemArg } => visit_v128_load16_splat (load v128) - @simd V128Load32Splat { memarg: $crate::MemArg } => visit_v128_load32_splat (load v128) - @simd V128Load64Splat { memarg: $crate::MemArg } => visit_v128_load64_splat (load v128) - @simd V128Load32Zero { memarg: $crate::MemArg } => visit_v128_load32_zero (load v128) - @simd V128Load64Zero { memarg: $crate::MemArg } => visit_v128_load64_zero (load v128) - @simd V128Store { memarg: $crate::MemArg } => visit_v128_store (store v128) - @simd V128Load8Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_load8_lane (load lane 16) - @simd V128Load16Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_load16_lane (load lane 8) - @simd V128Load32Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_load32_lane (load lane 4) - @simd V128Load64Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_load64_lane (load lane 2) - @simd V128Store8Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_store8_lane (store lane 16) - @simd V128Store16Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_store16_lane (store lane 8) - @simd V128Store32Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_store32_lane (store lane 4) - @simd V128Store64Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_store64_lane (store lane 2) - @simd V128Const { value: $crate::V128 } => visit_v128_const (push v128) - @simd I8x16Shuffle { lanes: [u8; 16] } => visit_i8x16_shuffle (arity 2 -> 1) - @simd I8x16ExtractLaneS { lane: u8 } => visit_i8x16_extract_lane_s (extract i32 16) - @simd I8x16ExtractLaneU { lane: u8 } => visit_i8x16_extract_lane_u (extract i32 16) - @simd I8x16ReplaceLane { lane: u8 } => visit_i8x16_replace_lane (replace i32 16) - @simd I16x8ExtractLaneS { lane: u8 } => visit_i16x8_extract_lane_s (extract i32 8) - @simd I16x8ExtractLaneU { lane: u8 } => visit_i16x8_extract_lane_u (extract i32 8) - @simd I16x8ReplaceLane { lane: u8 } => visit_i16x8_replace_lane (replace i32 8) - @simd I32x4ExtractLane { lane: u8 } => visit_i32x4_extract_lane (extract i32 4) - @simd I32x4ReplaceLane { lane: u8 } => visit_i32x4_replace_lane (replace i32 4) - @simd I64x2ExtractLane { lane: u8 } => visit_i64x2_extract_lane (extract i64 2) - @simd I64x2ReplaceLane { lane: u8 } => visit_i64x2_replace_lane (replace i64 2) - @simd F32x4ExtractLane { lane: u8 } => visit_f32x4_extract_lane (extract f32 4) - @simd F32x4ReplaceLane { lane: u8 } => visit_f32x4_replace_lane (replace f32 4) - @simd F64x2ExtractLane { lane: u8 } => visit_f64x2_extract_lane (extract f64 2) - @simd F64x2ReplaceLane { lane: u8 } => visit_f64x2_replace_lane (replace f64 2) - @simd I8x16Swizzle => visit_i8x16_swizzle (binary v128) - @simd I8x16Splat => visit_i8x16_splat (splat i32) - @simd I16x8Splat => visit_i16x8_splat (splat i32) - @simd I32x4Splat => visit_i32x4_splat (splat i32) - @simd I64x2Splat => visit_i64x2_splat (splat i64) - @simd F32x4Splat => visit_f32x4_splat (splat f32) - @simd F64x2Splat => visit_f64x2_splat (splat f64) - @simd I8x16Eq => visit_i8x16_eq (binary v128) - @simd I8x16Ne => visit_i8x16_ne (binary v128) - @simd I8x16LtS => visit_i8x16_lt_s (binary v128) - @simd I8x16LtU => visit_i8x16_lt_u (binary v128) - @simd I8x16GtS => visit_i8x16_gt_s (binary v128) - @simd I8x16GtU => visit_i8x16_gt_u (binary v128) - @simd I8x16LeS => visit_i8x16_le_s (binary v128) - @simd I8x16LeU => visit_i8x16_le_u (binary v128) - @simd I8x16GeS => visit_i8x16_ge_s (binary v128) - @simd I8x16GeU => visit_i8x16_ge_u (binary v128) - @simd I16x8Eq => visit_i16x8_eq (binary v128) - @simd I16x8Ne => visit_i16x8_ne (binary v128) - @simd I16x8LtS => visit_i16x8_lt_s (binary v128) - @simd I16x8LtU => visit_i16x8_lt_u (binary v128) - @simd I16x8GtS => visit_i16x8_gt_s (binary v128) - @simd I16x8GtU => visit_i16x8_gt_u (binary v128) - @simd I16x8LeS => visit_i16x8_le_s (binary v128) - @simd I16x8LeU => visit_i16x8_le_u (binary v128) - @simd I16x8GeS => visit_i16x8_ge_s (binary v128) - @simd I16x8GeU => visit_i16x8_ge_u (binary v128) - @simd I32x4Eq => visit_i32x4_eq (binary v128) - @simd I32x4Ne => visit_i32x4_ne (binary v128) - @simd I32x4LtS => visit_i32x4_lt_s (binary v128) - @simd I32x4LtU => visit_i32x4_lt_u (binary v128) - @simd I32x4GtS => visit_i32x4_gt_s (binary v128) - @simd I32x4GtU => visit_i32x4_gt_u (binary v128) - @simd I32x4LeS => visit_i32x4_le_s (binary v128) - @simd I32x4LeU => visit_i32x4_le_u (binary v128) - @simd I32x4GeS => visit_i32x4_ge_s (binary v128) - @simd I32x4GeU => visit_i32x4_ge_u (binary v128) - @simd I64x2Eq => visit_i64x2_eq (binary v128) - @simd I64x2Ne => visit_i64x2_ne (binary v128) - @simd I64x2LtS => visit_i64x2_lt_s (binary v128) - @simd I64x2GtS => visit_i64x2_gt_s (binary v128) - @simd I64x2LeS => visit_i64x2_le_s (binary v128) - @simd I64x2GeS => visit_i64x2_ge_s (binary v128) - @simd F32x4Eq => visit_f32x4_eq (binary v128f) - @simd F32x4Ne => visit_f32x4_ne (binary v128f) - @simd F32x4Lt => visit_f32x4_lt (binary v128f) - @simd F32x4Gt => visit_f32x4_gt (binary v128f) - @simd F32x4Le => visit_f32x4_le (binary v128f) - @simd F32x4Ge => visit_f32x4_ge (binary v128f) - @simd F64x2Eq => visit_f64x2_eq (binary v128f) - @simd F64x2Ne => visit_f64x2_ne (binary v128f) - @simd F64x2Lt => visit_f64x2_lt (binary v128f) - @simd F64x2Gt => visit_f64x2_gt (binary v128f) - @simd F64x2Le => visit_f64x2_le (binary v128f) - @simd F64x2Ge => visit_f64x2_ge (binary v128f) - @simd V128Not => visit_v128_not (unary v128) - @simd V128And => visit_v128_and (binary v128) - @simd V128AndNot => visit_v128_andnot (binary v128) - @simd V128Or => visit_v128_or (binary v128) - @simd V128Xor => visit_v128_xor (binary v128) - @simd V128Bitselect => visit_v128_bitselect (ternary v128) - @simd V128AnyTrue => visit_v128_any_true (test v128) - @simd I8x16Abs => visit_i8x16_abs (unary v128) - @simd I8x16Neg => visit_i8x16_neg (unary v128) - @simd I8x16Popcnt => visit_i8x16_popcnt (unary v128) - @simd I8x16AllTrue => visit_i8x16_all_true (test v128) - @simd I8x16Bitmask => visit_i8x16_bitmask (test v128) - @simd I8x16NarrowI16x8S => visit_i8x16_narrow_i16x8_s (binary v128) - @simd I8x16NarrowI16x8U => visit_i8x16_narrow_i16x8_u (binary v128) - @simd I8x16Shl => visit_i8x16_shl (shift v128) - @simd I8x16ShrS => visit_i8x16_shr_s (shift v128) - @simd I8x16ShrU => visit_i8x16_shr_u (shift v128) - @simd I8x16Add => visit_i8x16_add (binary v128) - @simd I8x16AddSatS => visit_i8x16_add_sat_s (binary v128) - @simd I8x16AddSatU => visit_i8x16_add_sat_u (binary v128) - @simd I8x16Sub => visit_i8x16_sub (binary v128) - @simd I8x16SubSatS => visit_i8x16_sub_sat_s (binary v128) - @simd I8x16SubSatU => visit_i8x16_sub_sat_u (binary v128) - @simd I8x16MinS => visit_i8x16_min_s (binary v128) - @simd I8x16MinU => visit_i8x16_min_u (binary v128) - @simd I8x16MaxS => visit_i8x16_max_s (binary v128) - @simd I8x16MaxU => visit_i8x16_max_u (binary v128) - @simd I8x16AvgrU => visit_i8x16_avgr_u (binary v128) - @simd I16x8ExtAddPairwiseI8x16S => visit_i16x8_extadd_pairwise_i8x16_s (unary v128) - @simd I16x8ExtAddPairwiseI8x16U => visit_i16x8_extadd_pairwise_i8x16_u (unary v128) - @simd I16x8Abs => visit_i16x8_abs (unary v128) - @simd I16x8Neg => visit_i16x8_neg (unary v128) - @simd I16x8Q15MulrSatS => visit_i16x8_q15mulr_sat_s (binary v128) - @simd I16x8AllTrue => visit_i16x8_all_true (test v128) - @simd I16x8Bitmask => visit_i16x8_bitmask (test v128) - @simd I16x8NarrowI32x4S => visit_i16x8_narrow_i32x4_s (binary v128) - @simd I16x8NarrowI32x4U => visit_i16x8_narrow_i32x4_u (binary v128) - @simd I16x8ExtendLowI8x16S => visit_i16x8_extend_low_i8x16_s (unary v128) - @simd I16x8ExtendHighI8x16S => visit_i16x8_extend_high_i8x16_s (unary v128) - @simd I16x8ExtendLowI8x16U => visit_i16x8_extend_low_i8x16_u (unary v128) - @simd I16x8ExtendHighI8x16U => visit_i16x8_extend_high_i8x16_u (unary v128) - @simd I16x8Shl => visit_i16x8_shl (shift v128) - @simd I16x8ShrS => visit_i16x8_shr_s (shift v128) - @simd I16x8ShrU => visit_i16x8_shr_u (shift v128) - @simd I16x8Add => visit_i16x8_add (binary v128) - @simd I16x8AddSatS => visit_i16x8_add_sat_s (binary v128) - @simd I16x8AddSatU => visit_i16x8_add_sat_u (binary v128) - @simd I16x8Sub => visit_i16x8_sub (binary v128) - @simd I16x8SubSatS => visit_i16x8_sub_sat_s (binary v128) - @simd I16x8SubSatU => visit_i16x8_sub_sat_u (binary v128) - @simd I16x8Mul => visit_i16x8_mul (binary v128) - @simd I16x8MinS => visit_i16x8_min_s (binary v128) - @simd I16x8MinU => visit_i16x8_min_u (binary v128) - @simd I16x8MaxS => visit_i16x8_max_s (binary v128) - @simd I16x8MaxU => visit_i16x8_max_u (binary v128) - @simd I16x8AvgrU => visit_i16x8_avgr_u (binary v128) - @simd I16x8ExtMulLowI8x16S => visit_i16x8_extmul_low_i8x16_s (binary v128) - @simd I16x8ExtMulHighI8x16S => visit_i16x8_extmul_high_i8x16_s (binary v128) - @simd I16x8ExtMulLowI8x16U => visit_i16x8_extmul_low_i8x16_u (binary v128) - @simd I16x8ExtMulHighI8x16U => visit_i16x8_extmul_high_i8x16_u (binary v128) - @simd I32x4ExtAddPairwiseI16x8S => visit_i32x4_extadd_pairwise_i16x8_s (unary v128) - @simd I32x4ExtAddPairwiseI16x8U => visit_i32x4_extadd_pairwise_i16x8_u (unary v128) - @simd I32x4Abs => visit_i32x4_abs (unary v128) - @simd I32x4Neg => visit_i32x4_neg (unary v128) - @simd I32x4AllTrue => visit_i32x4_all_true (test v128) - @simd I32x4Bitmask => visit_i32x4_bitmask (test v128) - @simd I32x4ExtendLowI16x8S => visit_i32x4_extend_low_i16x8_s (unary v128) - @simd I32x4ExtendHighI16x8S => visit_i32x4_extend_high_i16x8_s (unary v128) - @simd I32x4ExtendLowI16x8U => visit_i32x4_extend_low_i16x8_u (unary v128) - @simd I32x4ExtendHighI16x8U => visit_i32x4_extend_high_i16x8_u (unary v128) - @simd I32x4Shl => visit_i32x4_shl (shift v128) - @simd I32x4ShrS => visit_i32x4_shr_s (shift v128) - @simd I32x4ShrU => visit_i32x4_shr_u (shift v128) - @simd I32x4Add => visit_i32x4_add (binary v128) - @simd I32x4Sub => visit_i32x4_sub (binary v128) - @simd I32x4Mul => visit_i32x4_mul (binary v128) - @simd I32x4MinS => visit_i32x4_min_s (binary v128) - @simd I32x4MinU => visit_i32x4_min_u (binary v128) - @simd I32x4MaxS => visit_i32x4_max_s (binary v128) - @simd I32x4MaxU => visit_i32x4_max_u (binary v128) - @simd I32x4DotI16x8S => visit_i32x4_dot_i16x8_s (binary v128) - @simd I32x4ExtMulLowI16x8S => visit_i32x4_extmul_low_i16x8_s (binary v128) - @simd I32x4ExtMulHighI16x8S => visit_i32x4_extmul_high_i16x8_s (binary v128) - @simd I32x4ExtMulLowI16x8U => visit_i32x4_extmul_low_i16x8_u (binary v128) - @simd I32x4ExtMulHighI16x8U => visit_i32x4_extmul_high_i16x8_u (binary v128) - @simd I64x2Abs => visit_i64x2_abs (unary v128) - @simd I64x2Neg => visit_i64x2_neg (unary v128) - @simd I64x2AllTrue => visit_i64x2_all_true (test v128) - @simd I64x2Bitmask => visit_i64x2_bitmask (test v128) - @simd I64x2ExtendLowI32x4S => visit_i64x2_extend_low_i32x4_s (unary v128) - @simd I64x2ExtendHighI32x4S => visit_i64x2_extend_high_i32x4_s (unary v128) - @simd I64x2ExtendLowI32x4U => visit_i64x2_extend_low_i32x4_u (unary v128) - @simd I64x2ExtendHighI32x4U => visit_i64x2_extend_high_i32x4_u (unary v128) - @simd I64x2Shl => visit_i64x2_shl (shift v128) - @simd I64x2ShrS => visit_i64x2_shr_s (shift v128) - @simd I64x2ShrU => visit_i64x2_shr_u (shift v128) - @simd I64x2Add => visit_i64x2_add (binary v128) - @simd I64x2Sub => visit_i64x2_sub (binary v128) - @simd I64x2Mul => visit_i64x2_mul (binary v128) - @simd I64x2ExtMulLowI32x4S => visit_i64x2_extmul_low_i32x4_s (binary v128) - @simd I64x2ExtMulHighI32x4S => visit_i64x2_extmul_high_i32x4_s (binary v128) - @simd I64x2ExtMulLowI32x4U => visit_i64x2_extmul_low_i32x4_u (binary v128) - @simd I64x2ExtMulHighI32x4U => visit_i64x2_extmul_high_i32x4_u (binary v128) - @simd F32x4Ceil => visit_f32x4_ceil (unary v128f) - @simd F32x4Floor => visit_f32x4_floor (unary v128f) - @simd F32x4Trunc => visit_f32x4_trunc (unary v128f) - @simd F32x4Nearest => visit_f32x4_nearest (unary v128f) - @simd F32x4Abs => visit_f32x4_abs (unary v128f) - @simd F32x4Neg => visit_f32x4_neg (unary v128f) - @simd F32x4Sqrt => visit_f32x4_sqrt (unary v128f) - @simd F32x4Add => visit_f32x4_add (binary v128f) - @simd F32x4Sub => visit_f32x4_sub (binary v128f) - @simd F32x4Mul => visit_f32x4_mul (binary v128f) - @simd F32x4Div => visit_f32x4_div (binary v128f) - @simd F32x4Min => visit_f32x4_min (binary v128f) - @simd F32x4Max => visit_f32x4_max (binary v128f) - @simd F32x4PMin => visit_f32x4_pmin (binary v128f) - @simd F32x4PMax => visit_f32x4_pmax (binary v128f) - @simd F64x2Ceil => visit_f64x2_ceil (unary v128f) - @simd F64x2Floor => visit_f64x2_floor (unary v128f) - @simd F64x2Trunc => visit_f64x2_trunc (unary v128f) - @simd F64x2Nearest => visit_f64x2_nearest (unary v128f) - @simd F64x2Abs => visit_f64x2_abs (unary v128f) - @simd F64x2Neg => visit_f64x2_neg (unary v128f) - @simd F64x2Sqrt => visit_f64x2_sqrt (unary v128f) - @simd F64x2Add => visit_f64x2_add (binary v128f) - @simd F64x2Sub => visit_f64x2_sub (binary v128f) - @simd F64x2Mul => visit_f64x2_mul (binary v128f) - @simd F64x2Div => visit_f64x2_div (binary v128f) - @simd F64x2Min => visit_f64x2_min (binary v128f) - @simd F64x2Max => visit_f64x2_max (binary v128f) - @simd F64x2PMin => visit_f64x2_pmin (binary v128f) - @simd F64x2PMax => visit_f64x2_pmax (binary v128f) - @simd I32x4TruncSatF32x4S => visit_i32x4_trunc_sat_f32x4_s (unary v128f) - @simd I32x4TruncSatF32x4U => visit_i32x4_trunc_sat_f32x4_u (unary v128f) - @simd F32x4ConvertI32x4S => visit_f32x4_convert_i32x4_s (unary v128f) - @simd F32x4ConvertI32x4U => visit_f32x4_convert_i32x4_u (unary v128f) - @simd I32x4TruncSatF64x2SZero => visit_i32x4_trunc_sat_f64x2_s_zero (unary v128f) - @simd I32x4TruncSatF64x2UZero => visit_i32x4_trunc_sat_f64x2_u_zero (unary v128f) - @simd F64x2ConvertLowI32x4S => visit_f64x2_convert_low_i32x4_s (unary v128f) - @simd F64x2ConvertLowI32x4U => visit_f64x2_convert_low_i32x4_u (unary v128f) - @simd F32x4DemoteF64x2Zero => visit_f32x4_demote_f64x2_zero (unary v128f) - @simd F64x2PromoteLowF32x4 => visit_f64x2_promote_low_f32x4 (unary v128f) - - // Relaxed SIMD operators - // https://github.com/WebAssembly/relaxed-simd - @relaxed_simd I8x16RelaxedSwizzle => visit_i8x16_relaxed_swizzle (binary v128) - @relaxed_simd I32x4RelaxedTruncF32x4S => visit_i32x4_relaxed_trunc_f32x4_s (unary v128) - @relaxed_simd I32x4RelaxedTruncF32x4U => visit_i32x4_relaxed_trunc_f32x4_u (unary v128) - @relaxed_simd I32x4RelaxedTruncF64x2SZero => visit_i32x4_relaxed_trunc_f64x2_s_zero (unary v128) - @relaxed_simd I32x4RelaxedTruncF64x2UZero => visit_i32x4_relaxed_trunc_f64x2_u_zero (unary v128) - @relaxed_simd F32x4RelaxedMadd => visit_f32x4_relaxed_madd (ternary v128) - @relaxed_simd F32x4RelaxedNmadd => visit_f32x4_relaxed_nmadd (ternary v128) - @relaxed_simd F64x2RelaxedMadd => visit_f64x2_relaxed_madd (ternary v128) - @relaxed_simd F64x2RelaxedNmadd => visit_f64x2_relaxed_nmadd (ternary v128) - @relaxed_simd I8x16RelaxedLaneselect => visit_i8x16_relaxed_laneselect (ternary v128) - @relaxed_simd I16x8RelaxedLaneselect => visit_i16x8_relaxed_laneselect (ternary v128) - @relaxed_simd I32x4RelaxedLaneselect => visit_i32x4_relaxed_laneselect (ternary v128) - @relaxed_simd I64x2RelaxedLaneselect => visit_i64x2_relaxed_laneselect (ternary v128) - @relaxed_simd F32x4RelaxedMin => visit_f32x4_relaxed_min (binary v128) - @relaxed_simd F32x4RelaxedMax => visit_f32x4_relaxed_max (binary v128) - @relaxed_simd F64x2RelaxedMin => visit_f64x2_relaxed_min (binary v128) - @relaxed_simd F64x2RelaxedMax => visit_f64x2_relaxed_max (binary v128) - @relaxed_simd I16x8RelaxedQ15mulrS => visit_i16x8_relaxed_q15mulr_s (binary v128) - @relaxed_simd I16x8RelaxedDotI8x16I7x16S => visit_i16x8_relaxed_dot_i8x16_i7x16_s (binary v128) - @relaxed_simd I32x4RelaxedDotI8x16I7x16AddS => visit_i32x4_relaxed_dot_i8x16_i7x16_add_s (ternary v128) - } - }; -} diff --git a/crates/wasmparser/src/lib.rs b/crates/wasmparser/src/lib.rs index 50eea01a2f..d0f4e371bf 100644 --- a/crates/wasmparser/src/lib.rs +++ b/crates/wasmparser/src/lib.rs @@ -26,6 +26,7 @@ //! To get started, create a [`Parser`] using [`Parser::new`] and then follow //! the examples documented for [`Parser::parse`] or [`Parser::parse_all`]. +#![recursion_limit = "1024"] #![deny(missing_docs)] #![no_std] #![cfg_attr(docsrs, feature(doc_auto_cfg))] @@ -204,7 +205,8 @@ mod prelude { /// } /// ``` #[macro_export] -macro_rules! for_each_operator { +#[doc(hidden)] +macro_rules! _for_each_operator { ($mac:ident) => { $mac! { @mvp Unreachable => visit_unreachable (arity 0 -> 0) @@ -552,6 +554,270 @@ macro_rules! for_each_operator { @threads I64AtomicRmw16CmpxchgU { memarg: $crate::MemArg } => visit_i64_atomic_rmw16_cmpxchg_u (atomic cmpxchg i64) @threads I64AtomicRmw32CmpxchgU { memarg: $crate::MemArg } => visit_i64_atomic_rmw32_cmpxchg_u (atomic cmpxchg i64) + // 0xFD operators + // 128-bit SIMD + // - https://github.com/webassembly/simd + // - https://webassembly.github.io/simd/core/binary/instructions.html + @simd V128Load { memarg: $crate::MemArg } => visit_v128_load (load v128) + @simd V128Load8x8S { memarg: $crate::MemArg } => visit_v128_load8x8_s (load v128) + @simd V128Load8x8U { memarg: $crate::MemArg } => visit_v128_load8x8_u (load v128) + @simd V128Load16x4S { memarg: $crate::MemArg } => visit_v128_load16x4_s (load v128) + @simd V128Load16x4U { memarg: $crate::MemArg } => visit_v128_load16x4_u (load v128) + @simd V128Load32x2S { memarg: $crate::MemArg } => visit_v128_load32x2_s (load v128) + @simd V128Load32x2U { memarg: $crate::MemArg } => visit_v128_load32x2_u (load v128) + @simd V128Load8Splat { memarg: $crate::MemArg } => visit_v128_load8_splat (load v128) + @simd V128Load16Splat { memarg: $crate::MemArg } => visit_v128_load16_splat (load v128) + @simd V128Load32Splat { memarg: $crate::MemArg } => visit_v128_load32_splat (load v128) + @simd V128Load64Splat { memarg: $crate::MemArg } => visit_v128_load64_splat (load v128) + @simd V128Load32Zero { memarg: $crate::MemArg } => visit_v128_load32_zero (load v128) + @simd V128Load64Zero { memarg: $crate::MemArg } => visit_v128_load64_zero (load v128) + @simd V128Store { memarg: $crate::MemArg } => visit_v128_store (store v128) + @simd V128Load8Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_load8_lane (load lane 16) + @simd V128Load16Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_load16_lane (load lane 8) + @simd V128Load32Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_load32_lane (load lane 4) + @simd V128Load64Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_load64_lane (load lane 2) + @simd V128Store8Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_store8_lane (store lane 16) + @simd V128Store16Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_store16_lane (store lane 8) + @simd V128Store32Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_store32_lane (store lane 4) + @simd V128Store64Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_store64_lane (store lane 2) + @simd V128Const { value: $crate::V128 } => visit_v128_const (push v128) + @simd I8x16Shuffle { lanes: [u8; 16] } => visit_i8x16_shuffle (arity 2 -> 1) + @simd I8x16ExtractLaneS { lane: u8 } => visit_i8x16_extract_lane_s (extract i32 16) + @simd I8x16ExtractLaneU { lane: u8 } => visit_i8x16_extract_lane_u (extract i32 16) + @simd I8x16ReplaceLane { lane: u8 } => visit_i8x16_replace_lane (replace i32 16) + @simd I16x8ExtractLaneS { lane: u8 } => visit_i16x8_extract_lane_s (extract i32 8) + @simd I16x8ExtractLaneU { lane: u8 } => visit_i16x8_extract_lane_u (extract i32 8) + @simd I16x8ReplaceLane { lane: u8 } => visit_i16x8_replace_lane (replace i32 8) + @simd I32x4ExtractLane { lane: u8 } => visit_i32x4_extract_lane (extract i32 4) + @simd I32x4ReplaceLane { lane: u8 } => visit_i32x4_replace_lane (replace i32 4) + @simd I64x2ExtractLane { lane: u8 } => visit_i64x2_extract_lane (extract i64 2) + @simd I64x2ReplaceLane { lane: u8 } => visit_i64x2_replace_lane (replace i64 2) + @simd F32x4ExtractLane { lane: u8 } => visit_f32x4_extract_lane (extract f32 4) + @simd F32x4ReplaceLane { lane: u8 } => visit_f32x4_replace_lane (replace f32 4) + @simd F64x2ExtractLane { lane: u8 } => visit_f64x2_extract_lane (extract f64 2) + @simd F64x2ReplaceLane { lane: u8 } => visit_f64x2_replace_lane (replace f64 2) + @simd I8x16Swizzle => visit_i8x16_swizzle (binary v128) + @simd I8x16Splat => visit_i8x16_splat (splat i32) + @simd I16x8Splat => visit_i16x8_splat (splat i32) + @simd I32x4Splat => visit_i32x4_splat (splat i32) + @simd I64x2Splat => visit_i64x2_splat (splat i64) + @simd F32x4Splat => visit_f32x4_splat (splat f32) + @simd F64x2Splat => visit_f64x2_splat (splat f64) + @simd I8x16Eq => visit_i8x16_eq (binary v128) + @simd I8x16Ne => visit_i8x16_ne (binary v128) + @simd I8x16LtS => visit_i8x16_lt_s (binary v128) + @simd I8x16LtU => visit_i8x16_lt_u (binary v128) + @simd I8x16GtS => visit_i8x16_gt_s (binary v128) + @simd I8x16GtU => visit_i8x16_gt_u (binary v128) + @simd I8x16LeS => visit_i8x16_le_s (binary v128) + @simd I8x16LeU => visit_i8x16_le_u (binary v128) + @simd I8x16GeS => visit_i8x16_ge_s (binary v128) + @simd I8x16GeU => visit_i8x16_ge_u (binary v128) + @simd I16x8Eq => visit_i16x8_eq (binary v128) + @simd I16x8Ne => visit_i16x8_ne (binary v128) + @simd I16x8LtS => visit_i16x8_lt_s (binary v128) + @simd I16x8LtU => visit_i16x8_lt_u (binary v128) + @simd I16x8GtS => visit_i16x8_gt_s (binary v128) + @simd I16x8GtU => visit_i16x8_gt_u (binary v128) + @simd I16x8LeS => visit_i16x8_le_s (binary v128) + @simd I16x8LeU => visit_i16x8_le_u (binary v128) + @simd I16x8GeS => visit_i16x8_ge_s (binary v128) + @simd I16x8GeU => visit_i16x8_ge_u (binary v128) + @simd I32x4Eq => visit_i32x4_eq (binary v128) + @simd I32x4Ne => visit_i32x4_ne (binary v128) + @simd I32x4LtS => visit_i32x4_lt_s (binary v128) + @simd I32x4LtU => visit_i32x4_lt_u (binary v128) + @simd I32x4GtS => visit_i32x4_gt_s (binary v128) + @simd I32x4GtU => visit_i32x4_gt_u (binary v128) + @simd I32x4LeS => visit_i32x4_le_s (binary v128) + @simd I32x4LeU => visit_i32x4_le_u (binary v128) + @simd I32x4GeS => visit_i32x4_ge_s (binary v128) + @simd I32x4GeU => visit_i32x4_ge_u (binary v128) + @simd I64x2Eq => visit_i64x2_eq (binary v128) + @simd I64x2Ne => visit_i64x2_ne (binary v128) + @simd I64x2LtS => visit_i64x2_lt_s (binary v128) + @simd I64x2GtS => visit_i64x2_gt_s (binary v128) + @simd I64x2LeS => visit_i64x2_le_s (binary v128) + @simd I64x2GeS => visit_i64x2_ge_s (binary v128) + @simd F32x4Eq => visit_f32x4_eq (binary v128f) + @simd F32x4Ne => visit_f32x4_ne (binary v128f) + @simd F32x4Lt => visit_f32x4_lt (binary v128f) + @simd F32x4Gt => visit_f32x4_gt (binary v128f) + @simd F32x4Le => visit_f32x4_le (binary v128f) + @simd F32x4Ge => visit_f32x4_ge (binary v128f) + @simd F64x2Eq => visit_f64x2_eq (binary v128f) + @simd F64x2Ne => visit_f64x2_ne (binary v128f) + @simd F64x2Lt => visit_f64x2_lt (binary v128f) + @simd F64x2Gt => visit_f64x2_gt (binary v128f) + @simd F64x2Le => visit_f64x2_le (binary v128f) + @simd F64x2Ge => visit_f64x2_ge (binary v128f) + @simd V128Not => visit_v128_not (unary v128) + @simd V128And => visit_v128_and (binary v128) + @simd V128AndNot => visit_v128_andnot (binary v128) + @simd V128Or => visit_v128_or (binary v128) + @simd V128Xor => visit_v128_xor (binary v128) + @simd V128Bitselect => visit_v128_bitselect (ternary v128) + @simd V128AnyTrue => visit_v128_any_true (test v128) + @simd I8x16Abs => visit_i8x16_abs (unary v128) + @simd I8x16Neg => visit_i8x16_neg (unary v128) + @simd I8x16Popcnt => visit_i8x16_popcnt (unary v128) + @simd I8x16AllTrue => visit_i8x16_all_true (test v128) + @simd I8x16Bitmask => visit_i8x16_bitmask (test v128) + @simd I8x16NarrowI16x8S => visit_i8x16_narrow_i16x8_s (binary v128) + @simd I8x16NarrowI16x8U => visit_i8x16_narrow_i16x8_u (binary v128) + @simd I8x16Shl => visit_i8x16_shl (shift v128) + @simd I8x16ShrS => visit_i8x16_shr_s (shift v128) + @simd I8x16ShrU => visit_i8x16_shr_u (shift v128) + @simd I8x16Add => visit_i8x16_add (binary v128) + @simd I8x16AddSatS => visit_i8x16_add_sat_s (binary v128) + @simd I8x16AddSatU => visit_i8x16_add_sat_u (binary v128) + @simd I8x16Sub => visit_i8x16_sub (binary v128) + @simd I8x16SubSatS => visit_i8x16_sub_sat_s (binary v128) + @simd I8x16SubSatU => visit_i8x16_sub_sat_u (binary v128) + @simd I8x16MinS => visit_i8x16_min_s (binary v128) + @simd I8x16MinU => visit_i8x16_min_u (binary v128) + @simd I8x16MaxS => visit_i8x16_max_s (binary v128) + @simd I8x16MaxU => visit_i8x16_max_u (binary v128) + @simd I8x16AvgrU => visit_i8x16_avgr_u (binary v128) + @simd I16x8ExtAddPairwiseI8x16S => visit_i16x8_extadd_pairwise_i8x16_s (unary v128) + @simd I16x8ExtAddPairwiseI8x16U => visit_i16x8_extadd_pairwise_i8x16_u (unary v128) + @simd I16x8Abs => visit_i16x8_abs (unary v128) + @simd I16x8Neg => visit_i16x8_neg (unary v128) + @simd I16x8Q15MulrSatS => visit_i16x8_q15mulr_sat_s (binary v128) + @simd I16x8AllTrue => visit_i16x8_all_true (test v128) + @simd I16x8Bitmask => visit_i16x8_bitmask (test v128) + @simd I16x8NarrowI32x4S => visit_i16x8_narrow_i32x4_s (binary v128) + @simd I16x8NarrowI32x4U => visit_i16x8_narrow_i32x4_u (binary v128) + @simd I16x8ExtendLowI8x16S => visit_i16x8_extend_low_i8x16_s (unary v128) + @simd I16x8ExtendHighI8x16S => visit_i16x8_extend_high_i8x16_s (unary v128) + @simd I16x8ExtendLowI8x16U => visit_i16x8_extend_low_i8x16_u (unary v128) + @simd I16x8ExtendHighI8x16U => visit_i16x8_extend_high_i8x16_u (unary v128) + @simd I16x8Shl => visit_i16x8_shl (shift v128) + @simd I16x8ShrS => visit_i16x8_shr_s (shift v128) + @simd I16x8ShrU => visit_i16x8_shr_u (shift v128) + @simd I16x8Add => visit_i16x8_add (binary v128) + @simd I16x8AddSatS => visit_i16x8_add_sat_s (binary v128) + @simd I16x8AddSatU => visit_i16x8_add_sat_u (binary v128) + @simd I16x8Sub => visit_i16x8_sub (binary v128) + @simd I16x8SubSatS => visit_i16x8_sub_sat_s (binary v128) + @simd I16x8SubSatU => visit_i16x8_sub_sat_u (binary v128) + @simd I16x8Mul => visit_i16x8_mul (binary v128) + @simd I16x8MinS => visit_i16x8_min_s (binary v128) + @simd I16x8MinU => visit_i16x8_min_u (binary v128) + @simd I16x8MaxS => visit_i16x8_max_s (binary v128) + @simd I16x8MaxU => visit_i16x8_max_u (binary v128) + @simd I16x8AvgrU => visit_i16x8_avgr_u (binary v128) + @simd I16x8ExtMulLowI8x16S => visit_i16x8_extmul_low_i8x16_s (binary v128) + @simd I16x8ExtMulHighI8x16S => visit_i16x8_extmul_high_i8x16_s (binary v128) + @simd I16x8ExtMulLowI8x16U => visit_i16x8_extmul_low_i8x16_u (binary v128) + @simd I16x8ExtMulHighI8x16U => visit_i16x8_extmul_high_i8x16_u (binary v128) + @simd I32x4ExtAddPairwiseI16x8S => visit_i32x4_extadd_pairwise_i16x8_s (unary v128) + @simd I32x4ExtAddPairwiseI16x8U => visit_i32x4_extadd_pairwise_i16x8_u (unary v128) + @simd I32x4Abs => visit_i32x4_abs (unary v128) + @simd I32x4Neg => visit_i32x4_neg (unary v128) + @simd I32x4AllTrue => visit_i32x4_all_true (test v128) + @simd I32x4Bitmask => visit_i32x4_bitmask (test v128) + @simd I32x4ExtendLowI16x8S => visit_i32x4_extend_low_i16x8_s (unary v128) + @simd I32x4ExtendHighI16x8S => visit_i32x4_extend_high_i16x8_s (unary v128) + @simd I32x4ExtendLowI16x8U => visit_i32x4_extend_low_i16x8_u (unary v128) + @simd I32x4ExtendHighI16x8U => visit_i32x4_extend_high_i16x8_u (unary v128) + @simd I32x4Shl => visit_i32x4_shl (shift v128) + @simd I32x4ShrS => visit_i32x4_shr_s (shift v128) + @simd I32x4ShrU => visit_i32x4_shr_u (shift v128) + @simd I32x4Add => visit_i32x4_add (binary v128) + @simd I32x4Sub => visit_i32x4_sub (binary v128) + @simd I32x4Mul => visit_i32x4_mul (binary v128) + @simd I32x4MinS => visit_i32x4_min_s (binary v128) + @simd I32x4MinU => visit_i32x4_min_u (binary v128) + @simd I32x4MaxS => visit_i32x4_max_s (binary v128) + @simd I32x4MaxU => visit_i32x4_max_u (binary v128) + @simd I32x4DotI16x8S => visit_i32x4_dot_i16x8_s (binary v128) + @simd I32x4ExtMulLowI16x8S => visit_i32x4_extmul_low_i16x8_s (binary v128) + @simd I32x4ExtMulHighI16x8S => visit_i32x4_extmul_high_i16x8_s (binary v128) + @simd I32x4ExtMulLowI16x8U => visit_i32x4_extmul_low_i16x8_u (binary v128) + @simd I32x4ExtMulHighI16x8U => visit_i32x4_extmul_high_i16x8_u (binary v128) + @simd I64x2Abs => visit_i64x2_abs (unary v128) + @simd I64x2Neg => visit_i64x2_neg (unary v128) + @simd I64x2AllTrue => visit_i64x2_all_true (test v128) + @simd I64x2Bitmask => visit_i64x2_bitmask (test v128) + @simd I64x2ExtendLowI32x4S => visit_i64x2_extend_low_i32x4_s (unary v128) + @simd I64x2ExtendHighI32x4S => visit_i64x2_extend_high_i32x4_s (unary v128) + @simd I64x2ExtendLowI32x4U => visit_i64x2_extend_low_i32x4_u (unary v128) + @simd I64x2ExtendHighI32x4U => visit_i64x2_extend_high_i32x4_u (unary v128) + @simd I64x2Shl => visit_i64x2_shl (shift v128) + @simd I64x2ShrS => visit_i64x2_shr_s (shift v128) + @simd I64x2ShrU => visit_i64x2_shr_u (shift v128) + @simd I64x2Add => visit_i64x2_add (binary v128) + @simd I64x2Sub => visit_i64x2_sub (binary v128) + @simd I64x2Mul => visit_i64x2_mul (binary v128) + @simd I64x2ExtMulLowI32x4S => visit_i64x2_extmul_low_i32x4_s (binary v128) + @simd I64x2ExtMulHighI32x4S => visit_i64x2_extmul_high_i32x4_s (binary v128) + @simd I64x2ExtMulLowI32x4U => visit_i64x2_extmul_low_i32x4_u (binary v128) + @simd I64x2ExtMulHighI32x4U => visit_i64x2_extmul_high_i32x4_u (binary v128) + @simd F32x4Ceil => visit_f32x4_ceil (unary v128f) + @simd F32x4Floor => visit_f32x4_floor (unary v128f) + @simd F32x4Trunc => visit_f32x4_trunc (unary v128f) + @simd F32x4Nearest => visit_f32x4_nearest (unary v128f) + @simd F32x4Abs => visit_f32x4_abs (unary v128f) + @simd F32x4Neg => visit_f32x4_neg (unary v128f) + @simd F32x4Sqrt => visit_f32x4_sqrt (unary v128f) + @simd F32x4Add => visit_f32x4_add (binary v128f) + @simd F32x4Sub => visit_f32x4_sub (binary v128f) + @simd F32x4Mul => visit_f32x4_mul (binary v128f) + @simd F32x4Div => visit_f32x4_div (binary v128f) + @simd F32x4Min => visit_f32x4_min (binary v128f) + @simd F32x4Max => visit_f32x4_max (binary v128f) + @simd F32x4PMin => visit_f32x4_pmin (binary v128f) + @simd F32x4PMax => visit_f32x4_pmax (binary v128f) + @simd F64x2Ceil => visit_f64x2_ceil (unary v128f) + @simd F64x2Floor => visit_f64x2_floor (unary v128f) + @simd F64x2Trunc => visit_f64x2_trunc (unary v128f) + @simd F64x2Nearest => visit_f64x2_nearest (unary v128f) + @simd F64x2Abs => visit_f64x2_abs (unary v128f) + @simd F64x2Neg => visit_f64x2_neg (unary v128f) + @simd F64x2Sqrt => visit_f64x2_sqrt (unary v128f) + @simd F64x2Add => visit_f64x2_add (binary v128f) + @simd F64x2Sub => visit_f64x2_sub (binary v128f) + @simd F64x2Mul => visit_f64x2_mul (binary v128f) + @simd F64x2Div => visit_f64x2_div (binary v128f) + @simd F64x2Min => visit_f64x2_min (binary v128f) + @simd F64x2Max => visit_f64x2_max (binary v128f) + @simd F64x2PMin => visit_f64x2_pmin (binary v128f) + @simd F64x2PMax => visit_f64x2_pmax (binary v128f) + @simd I32x4TruncSatF32x4S => visit_i32x4_trunc_sat_f32x4_s (unary v128f) + @simd I32x4TruncSatF32x4U => visit_i32x4_trunc_sat_f32x4_u (unary v128f) + @simd F32x4ConvertI32x4S => visit_f32x4_convert_i32x4_s (unary v128f) + @simd F32x4ConvertI32x4U => visit_f32x4_convert_i32x4_u (unary v128f) + @simd I32x4TruncSatF64x2SZero => visit_i32x4_trunc_sat_f64x2_s_zero (unary v128f) + @simd I32x4TruncSatF64x2UZero => visit_i32x4_trunc_sat_f64x2_u_zero (unary v128f) + @simd F64x2ConvertLowI32x4S => visit_f64x2_convert_low_i32x4_s (unary v128f) + @simd F64x2ConvertLowI32x4U => visit_f64x2_convert_low_i32x4_u (unary v128f) + @simd F32x4DemoteF64x2Zero => visit_f32x4_demote_f64x2_zero (unary v128f) + @simd F64x2PromoteLowF32x4 => visit_f64x2_promote_low_f32x4 (unary v128f) + + // Relaxed SIMD operators + // https://github.com/WebAssembly/relaxed-simd + @relaxed_simd I8x16RelaxedSwizzle => visit_i8x16_relaxed_swizzle (binary v128) + @relaxed_simd I32x4RelaxedTruncF32x4S => visit_i32x4_relaxed_trunc_f32x4_s (unary v128) + @relaxed_simd I32x4RelaxedTruncF32x4U => visit_i32x4_relaxed_trunc_f32x4_u (unary v128) + @relaxed_simd I32x4RelaxedTruncF64x2SZero => visit_i32x4_relaxed_trunc_f64x2_s_zero (unary v128) + @relaxed_simd I32x4RelaxedTruncF64x2UZero => visit_i32x4_relaxed_trunc_f64x2_u_zero (unary v128) + @relaxed_simd F32x4RelaxedMadd => visit_f32x4_relaxed_madd (ternary v128) + @relaxed_simd F32x4RelaxedNmadd => visit_f32x4_relaxed_nmadd (ternary v128) + @relaxed_simd F64x2RelaxedMadd => visit_f64x2_relaxed_madd (ternary v128) + @relaxed_simd F64x2RelaxedNmadd => visit_f64x2_relaxed_nmadd (ternary v128) + @relaxed_simd I8x16RelaxedLaneselect => visit_i8x16_relaxed_laneselect (ternary v128) + @relaxed_simd I16x8RelaxedLaneselect => visit_i16x8_relaxed_laneselect (ternary v128) + @relaxed_simd I32x4RelaxedLaneselect => visit_i32x4_relaxed_laneselect (ternary v128) + @relaxed_simd I64x2RelaxedLaneselect => visit_i64x2_relaxed_laneselect (ternary v128) + @relaxed_simd F32x4RelaxedMin => visit_f32x4_relaxed_min (binary v128) + @relaxed_simd F32x4RelaxedMax => visit_f32x4_relaxed_max (binary v128) + @relaxed_simd F64x2RelaxedMin => visit_f64x2_relaxed_min (binary v128) + @relaxed_simd F64x2RelaxedMax => visit_f64x2_relaxed_max (binary v128) + @relaxed_simd I16x8RelaxedQ15mulrS => visit_i16x8_relaxed_q15mulr_s (binary v128) + @relaxed_simd I16x8RelaxedDotI8x16I7x16S => visit_i16x8_relaxed_dot_i8x16_i7x16_s (binary v128) + @relaxed_simd I32x4RelaxedDotI8x16I7x16AddS => visit_i32x4_relaxed_dot_i8x16_i7x16_add_s (ternary v128) + // Also 0xFE prefixed operators // shared-everything threads // https://github.com/WebAssembly/shared-everything-threads @@ -615,8 +881,93 @@ macro_rules! for_each_operator { }; } +// #[cfg(feature = "simd")] +macro_rules! define_for_each_non_simd_operator { + (@ $($t:tt)*) => {define_for_each_non_simd_operator!(filter [] @ $($t)*);}; + + (filter [$($t:tt)*]) => { + macro_rules! for_each_operator { + ($m:ident) => { + $m! { $($t)* } + } + } + }; + + ( + filter [$($t:tt)*] + @simd $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) $($rest:tt)* + ) => { + define_for_each_non_simd_operator!(filter [$($t)*] $($rest)*); + }; + ( + filter [$($t:tt)*] + @relaxed_simd $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) $($rest:tt)* + ) => { + define_for_each_non_simd_operator!(filter [$($t)*] $($rest)*); + }; + ( + filter [$($t:tt)*] + @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) $($rest:tt)* + ) => { + define_for_each_non_simd_operator!( + filter [ + $($t)* + @simd $op $({ $($arg: $argty),* })? => $visit ($($ann)*) + ] + $($rest)* + ); + }; +} +_for_each_operator!(define_for_each_non_simd_operator); +use for_each_operator; + +#[cfg(feature = "simd")] +macro_rules! define_for_each_simd_operator { + (@ $($t:tt)*) => {define_for_each_simd_operator!(filter [] @ $($t)*);}; + + (filter [$($t:tt)*]) => { + macro_rules! for_each_simd_operator { + ($m:ident) => { + $m! { $($t)* } + } + } + }; + + ( + filter [$($t:tt)*] + @simd $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) $($rest:tt)* + ) => { + define_for_each_simd_operator!( + filter [ + $($t)* + @simd $op $({ $($arg: $argty),* })? => $visit ($($ann)*) + ] + $($rest)* + ); + }; + ( + filter [$($t:tt)*] + @relaxed_simd $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) $($rest:tt)* + ) => { + define_for_each_simd_operator!( + filter [ + $($t)* + @simd $op $({ $($arg: $argty),* })? => $visit ($($ann)*) + ] + $($rest)* + ); + }; + ( + filter [$($t:tt)*] + @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) $($rest:tt)* + ) => { + define_for_each_simd_operator!(filter [$($t)*] $($rest)*); + }; +} +#[cfg(feature = "simd")] +_for_each_operator!(define_for_each_simd_operator); #[cfg(feature = "simd")] -mod for_each_simd_op; +use for_each_simd_operator; macro_rules! format_err { ($offset:expr, $($arg:tt)*) => { From 887a8cfcea9fe8777c3ecfc2f5801fbbc52e0ea7 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Fri, 22 Nov 2024 00:38:33 +0100 Subject: [PATCH 52/83] remove comment out line --- crates/wasmparser/src/lib.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/wasmparser/src/lib.rs b/crates/wasmparser/src/lib.rs index d0f4e371bf..a14a319780 100644 --- a/crates/wasmparser/src/lib.rs +++ b/crates/wasmparser/src/lib.rs @@ -881,7 +881,6 @@ macro_rules! _for_each_operator { }; } -// #[cfg(feature = "simd")] macro_rules! define_for_each_non_simd_operator { (@ $($t:tt)*) => {define_for_each_non_simd_operator!(filter [] @ $($t)*);}; From cbbc575b41c5ef2c4e874d4b4efce29eebe77f70 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Fri, 22 Nov 2024 00:45:44 +0100 Subject: [PATCH 53/83] put docs on the exported macros --- crates/wasmparser/src/lib.rs | 319 +++++++++++++++++++---------------- 1 file changed, 171 insertions(+), 148 deletions(-) diff --git a/crates/wasmparser/src/lib.rs b/crates/wasmparser/src/lib.rs index a14a319780..4288747b01 100644 --- a/crates/wasmparser/src/lib.rs +++ b/crates/wasmparser/src/lib.rs @@ -56,154 +56,6 @@ mod prelude { pub use crate::collections::{IndexMap, Map, Set}; } -/// A helper macro to conveniently iterate over all opcodes recognized by this -/// crate. This can be used to work with either the [`Operator`] enumeration or -/// the [`VisitOperator`] trait if your use case uniformly handles all operators -/// the same way. -/// -/// Note: SIMD operators are handled by the [`for_each_simd_operator`] macro. -/// -/// It is also possible to specialize handling of operators depending on the -/// Wasm proposal from which they are originating. -/// -/// This is an "iterator macro" where this macro is invoked with the name of -/// another macro, and then that macro is invoked with the list of all -/// operators. An example invocation of this looks like: -/// -/// The list of specializable Wasm proposals is as follows: -/// -/// - `@mvp`: Denoting a Wasm operator from the initial Wasm MVP version. -/// - `@exceptions`: [Wasm `exception-handling` proposal] -/// - `@tail_call`: [Wasm `tail-calls` proposal] -/// - `@reference_types`: [Wasm `reference-types` proposal] -/// - `@sign_extension`: [Wasm `sign-extension-ops` proposal] -/// - `@saturating_float_to_int`: [Wasm `non_trapping_float-to-int-conversions` proposal] -/// - `@bulk_memory `:[Wasm `bulk-memory` proposal] -/// - `@threads`: [Wasm `threads` proposal] -/// - `@gc`: [Wasm `gc` proposal] -/// - `@stack_switching`: [Wasm `stack-switching` proposal] -/// - `@wide_arithmetic`: [Wasm `wide-arithmetic` proposal] -/// -/// [Wasm `exception-handling` proposal]: -/// https://github.com/WebAssembly/exception-handling -/// -/// [Wasm `tail-calls` proposal]: -/// https://github.com/WebAssembly/tail-call -/// -/// [Wasm `reference-types` proposal]: -/// https://github.com/WebAssembly/reference-types -/// -/// [Wasm `sign-extension-ops` proposal]: -/// https://github.com/WebAssembly/sign-extension-ops -/// -/// [Wasm `non_trapping_float-to-int-conversions` proposal]: -/// https://github.com/WebAssembly/nontrapping-float-to-int-conversions -/// -/// [Wasm `bulk-memory` proposal]: -/// https://github.com/WebAssembly/bulk-memory-operations -/// -/// [Wasm `threads` proposal]: -/// https://github.com/webassembly/threads -/// -/// [Wasm `gc` proposal]: -/// https://github.com/WebAssembly/gc -/// -/// [Wasm `stack-switching` proposal]: -/// https://github.com/WebAssembly/stack-switching -/// -/// [Wasm `wide-arithmetic` proposal]: -/// https://github.com/WebAssembly/wide-arithmetic -/// -/// ``` -/// macro_rules! define_visit_operator { -/// // The outer layer of repetition represents how all operators are -/// // provided to the macro at the same time. -/// // -/// // The `$proposal` identifier indicates the Wasm proposals from which -/// // the Wasm operator is originating. -/// // For example to specialize the macro match arm for Wasm SIMD proposal -/// // operators you could write `@simd` instead of `@$proposal:ident` to -/// // only catch those operators. -/// // -/// // The `$op` name is bound to the `Operator` variant name. The -/// // payload of the operator is optionally specified (the `$(...)?` -/// // clause) since not all instructions have payloads. Within the payload -/// // each argument is named and has its type specified. -/// // -/// // The `$visit` name is bound to the corresponding name in the -/// // `VisitOperator` trait that this corresponds to. -/// // -/// // The `$ann` annotations give information about the operator's type (e.g. binary i32 or arity 2 -> 1). -/// ($( @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => { -/// $( -/// fn $visit(&mut self $($(,$arg: $argty)*)?) { -/// // do nothing for this example -/// } -/// )* -/// } -/// } -/// -/// pub struct VisitAndDoNothing; -/// -/// impl<'a> wasmparser::VisitOperator<'a> for VisitAndDoNothing { -/// type Output = (); -/// -/// wasmparser::for_each_operator!(define_visit_operator); -/// } -/// ``` -/// -/// If you only wanted to visit the initial base set of wasm instructions, for -/// example, you could do: -/// -/// ``` -/// macro_rules! visit_only_mvp { -/// // delegate the macro invocation to sub-invocations of this macro to -/// // deal with each instruction on a case-by-case basis. -/// ($( @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => { -/// $( -/// visit_only_mvp!(visit_one @$proposal $op $({ $($arg: $argty),* })? => $visit); -/// )* -/// }; -/// -/// // MVP instructions are defined manually, so do nothing. -/// (visit_one @mvp $($rest:tt)*) => {}; -/// -/// // Non-MVP instructions all return `false` here. The exact type depends -/// // on `type Output` in the trait implementation below. You could change -/// // it to `Result<()>` for example and return an error here too. -/// (visit_one @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident) => { -/// fn $visit(&mut self $($(,$arg: $argty)*)?) -> bool { -/// false -/// } -/// } -/// } -/// # // to get this example to compile another macro is used here to define -/// # // visit methods for all mvp oeprators. -/// # macro_rules! visit_mvp { -/// # ($( @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => { -/// # $( -/// # visit_mvp!(visit_one @$proposal $op $({ $($arg: $argty),* })? => $visit); -/// # )* -/// # }; -/// # (visit_one @mvp $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident) => { -/// # fn $visit(&mut self $($(,$arg: $argty)*)?) -> bool { -/// # true -/// # } -/// # }; -/// # (visit_one @$proposal:ident $($rest:tt)*) => {}; -/// # } -/// -/// pub struct VisitOnlyMvp; -/// -/// impl<'a> wasmparser::VisitOperator<'a> for VisitOnlyMvp { -/// type Output = bool; -/// -/// wasmparser::for_each_operator!(visit_only_mvp); -/// # wasmparser::for_each_operator!(visit_mvp); -/// -/// // manually define `visit_*` for all MVP operators here -/// } -/// ``` #[macro_export] #[doc(hidden)] macro_rules! _for_each_operator { @@ -885,6 +737,155 @@ macro_rules! define_for_each_non_simd_operator { (@ $($t:tt)*) => {define_for_each_non_simd_operator!(filter [] @ $($t)*);}; (filter [$($t:tt)*]) => { + /// A helper macro to conveniently iterate over all opcodes recognized by this + /// crate. This can be used to work with either the [`Operator`] enumeration or + /// the [`VisitOperator`] trait if your use case uniformly handles all operators + /// the same way. + /// + /// Note: SIMD operators are handled by the [`for_each_simd_operator`] macro. + /// + /// It is also possible to specialize handling of operators depending on the + /// Wasm proposal from which they are originating. + /// + /// This is an "iterator macro" where this macro is invoked with the name of + /// another macro, and then that macro is invoked with the list of all + /// operators. An example invocation of this looks like: + /// + /// The list of specializable Wasm proposals is as follows: + /// + /// - `@mvp`: Denoting a Wasm operator from the initial Wasm MVP version. + /// - `@exceptions`: [Wasm `exception-handling` proposal] + /// - `@tail_call`: [Wasm `tail-calls` proposal] + /// - `@reference_types`: [Wasm `reference-types` proposal] + /// - `@sign_extension`: [Wasm `sign-extension-ops` proposal] + /// - `@saturating_float_to_int`: [Wasm `non_trapping_float-to-int-conversions` proposal] + /// - `@bulk_memory `:[Wasm `bulk-memory` proposal] + /// - `@threads`: [Wasm `threads` proposal] + /// - `@gc`: [Wasm `gc` proposal] + /// - `@stack_switching`: [Wasm `stack-switching` proposal] + /// - `@wide_arithmetic`: [Wasm `wide-arithmetic` proposal] + /// + /// [Wasm `exception-handling` proposal]: + /// https://github.com/WebAssembly/exception-handling + /// + /// [Wasm `tail-calls` proposal]: + /// https://github.com/WebAssembly/tail-call + /// + /// [Wasm `reference-types` proposal]: + /// https://github.com/WebAssembly/reference-types + /// + /// [Wasm `sign-extension-ops` proposal]: + /// https://github.com/WebAssembly/sign-extension-ops + /// + /// [Wasm `non_trapping_float-to-int-conversions` proposal]: + /// https://github.com/WebAssembly/nontrapping-float-to-int-conversions + /// + /// [Wasm `bulk-memory` proposal]: + /// https://github.com/WebAssembly/bulk-memory-operations + /// + /// [Wasm `threads` proposal]: + /// https://github.com/webassembly/threads + /// + /// [Wasm `gc` proposal]: + /// https://github.com/WebAssembly/gc + /// + /// [Wasm `stack-switching` proposal]: + /// https://github.com/WebAssembly/stack-switching + /// + /// [Wasm `wide-arithmetic` proposal]: + /// https://github.com/WebAssembly/wide-arithmetic + /// + /// ``` + /// macro_rules! define_visit_operator { + /// // The outer layer of repetition represents how all operators are + /// // provided to the macro at the same time. + /// // + /// // The `$proposal` identifier indicates the Wasm proposals from which + /// // the Wasm operator is originating. + /// // For example to specialize the macro match arm for Wasm SIMD proposal + /// // operators you could write `@simd` instead of `@$proposal:ident` to + /// // only catch those operators. + /// // + /// // The `$op` name is bound to the `Operator` variant name. The + /// // payload of the operator is optionally specified (the `$(...)?` + /// // clause) since not all instructions have payloads. Within the payload + /// // each argument is named and has its type specified. + /// // + /// // The `$visit` name is bound to the corresponding name in the + /// // `VisitOperator` trait that this corresponds to. + /// // + /// // The `$ann` annotations give information about the operator's type (e.g. binary i32 or arity 2 -> 1). + /// ($( @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => { + /// $( + /// fn $visit(&mut self $($(,$arg: $argty)*)?) { + /// // do nothing for this example + /// } + /// )* + /// } + /// } + /// + /// pub struct VisitAndDoNothing; + /// + /// impl<'a> wasmparser::VisitOperator<'a> for VisitAndDoNothing { + /// type Output = (); + /// + /// wasmparser::for_each_operator!(define_visit_operator); + /// } + /// ``` + /// + /// If you only wanted to visit the initial base set of wasm instructions, for + /// example, you could do: + /// + /// ``` + /// macro_rules! visit_only_mvp { + /// // delegate the macro invocation to sub-invocations of this macro to + /// // deal with each instruction on a case-by-case basis. + /// ($( @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => { + /// $( + /// visit_only_mvp!(visit_one @$proposal $op $({ $($arg: $argty),* })? => $visit); + /// )* + /// }; + /// + /// // MVP instructions are defined manually, so do nothing. + /// (visit_one @mvp $($rest:tt)*) => {}; + /// + /// // Non-MVP instructions all return `false` here. The exact type depends + /// // on `type Output` in the trait implementation below. You could change + /// // it to `Result<()>` for example and return an error here too. + /// (visit_one @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident) => { + /// fn $visit(&mut self $($(,$arg: $argty)*)?) -> bool { + /// false + /// } + /// } + /// } + /// # // to get this example to compile another macro is used here to define + /// # // visit methods for all mvp oeprators. + /// # macro_rules! visit_mvp { + /// # ($( @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => { + /// # $( + /// # visit_mvp!(visit_one @$proposal $op $({ $($arg: $argty),* })? => $visit); + /// # )* + /// # }; + /// # (visit_one @mvp $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident) => { + /// # fn $visit(&mut self $($(,$arg: $argty)*)?) -> bool { + /// # true + /// # } + /// # }; + /// # (visit_one @$proposal:ident $($rest:tt)*) => {}; + /// # } + /// + /// pub struct VisitOnlyMvp; + /// + /// impl<'a> wasmparser::VisitOperator<'a> for VisitOnlyMvp { + /// type Output = bool; + /// + /// wasmparser::for_each_operator!(visit_only_mvp); + /// # wasmparser::for_each_operator!(visit_mvp); + /// + /// // manually define `visit_*` for all MVP operators here + /// } + /// ``` + #[macro_export] macro_rules! for_each_operator { ($m:ident) => { $m! { $($t)* } @@ -925,6 +926,28 @@ macro_rules! define_for_each_simd_operator { (@ $($t:tt)*) => {define_for_each_simd_operator!(filter [] @ $($t)*);}; (filter [$($t:tt)*]) => { + /// A helper macro to conveniently iterate over all opcodes recognized by this + /// crate. This can be used to work with either the [`SimdOperator`] enumeration or + /// the [`VisitSimdOperator`] trait if your use case uniformly handles all operators + /// the same way. + /// + /// The list of specializable Wasm proposals is as follows: + /// + /// - `@simd`: [Wasm `simd` proposal] + /// - `@relaxed_simd`: [Wasm `relaxed-simd` proposal] + /// + /// For more information about the structure and use of this macro please + /// refer to the documentation of the [`for_each_operator`] macro. + /// + /// [Wasm `simd` proposal]: + /// https://github.com/webassembly/simd + /// + /// [Wasm `relaxed-simd` proposal]: + /// https://github.com/WebAssembly/relaxed-simd + /// + /// [`SimdOperator`]: crate::SimdOperator + /// [`VisitSimdOperator`]: crate::VisitSimdOperator + #[macro_export] macro_rules! for_each_simd_operator { ($m:ident) => { $m! { $($t)* } From ba4838696befff7fcab839fae459d3e4955c993d Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Fri, 22 Nov 2024 00:45:56 +0100 Subject: [PATCH 54/83] fix macro imports --- crates/wasmparser/src/arity.rs | 2 -- crates/wasmparser/src/binary_reader.rs | 2 -- crates/wasmparser/src/lib.rs | 3 --- crates/wasmparser/src/readers/core/operators.rs | 2 -- crates/wasmparser/src/validator/core.rs | 2 +- crates/wasmparser/src/validator/operators.rs | 2 +- 6 files changed, 2 insertions(+), 11 deletions(-) diff --git a/crates/wasmparser/src/arity.rs b/crates/wasmparser/src/arity.rs index 618dd8a6b2..ebd7da4512 100644 --- a/crates/wasmparser/src/arity.rs +++ b/crates/wasmparser/src/arity.rs @@ -13,8 +13,6 @@ * limitations under the License. */ -#[cfg(feature = "simd")] -use crate::for_each_simd_operator; #[cfg(feature = "simd")] use crate::SimdOperator; use crate::{ diff --git a/crates/wasmparser/src/binary_reader.rs b/crates/wasmparser/src/binary_reader.rs index 1fc11051d2..3c0120ba87 100644 --- a/crates/wasmparser/src/binary_reader.rs +++ b/crates/wasmparser/src/binary_reader.rs @@ -16,8 +16,6 @@ #[cfg(feature = "simd")] mod simd; -#[cfg(feature = "simd")] -use crate::for_each_simd_operator; use crate::prelude::*; use crate::{limits::*, *}; use core::fmt; diff --git a/crates/wasmparser/src/lib.rs b/crates/wasmparser/src/lib.rs index 4288747b01..af7439380a 100644 --- a/crates/wasmparser/src/lib.rs +++ b/crates/wasmparser/src/lib.rs @@ -919,7 +919,6 @@ macro_rules! define_for_each_non_simd_operator { }; } _for_each_operator!(define_for_each_non_simd_operator); -use for_each_operator; #[cfg(feature = "simd")] macro_rules! define_for_each_simd_operator { @@ -988,8 +987,6 @@ macro_rules! define_for_each_simd_operator { } #[cfg(feature = "simd")] _for_each_operator!(define_for_each_simd_operator); -#[cfg(feature = "simd")] -use for_each_simd_operator; macro_rules! format_err { ($offset:expr, $($arg:tt)*) => { diff --git a/crates/wasmparser/src/readers/core/operators.rs b/crates/wasmparser/src/readers/core/operators.rs index f6f5a68a01..dc769f14ce 100644 --- a/crates/wasmparser/src/readers/core/operators.rs +++ b/crates/wasmparser/src/readers/core/operators.rs @@ -13,8 +13,6 @@ * limitations under the License. */ -#[cfg(feature = "simd")] -use crate::for_each_simd_operator; use crate::limits::{MAX_WASM_CATCHES, MAX_WASM_HANDLERS}; use crate::prelude::*; use crate::{BinaryReader, BinaryReaderError, FromReader, Result, ValType}; diff --git a/crates/wasmparser/src/validator/core.rs b/crates/wasmparser/src/validator/core.rs index b6fb1d0f5d..49e38b82cb 100644 --- a/crates/wasmparser/src/validator/core.rs +++ b/crates/wasmparser/src/validator/core.rs @@ -536,7 +536,7 @@ impl ModuleState { #[cfg(feature = "simd")] impl<'a> VisitSimdOperator<'a> for VisitConstOperator<'a> { - crate::for_each_simd_operator!(define_visit_operator); + for_each_simd_operator!(define_visit_operator); } } } diff --git a/crates/wasmparser/src/validator/operators.rs b/crates/wasmparser/src/validator/operators.rs index 92a6cb70a6..eae075bb93 100644 --- a/crates/wasmparser/src/validator/operators.rs +++ b/crates/wasmparser/src/validator/operators.rs @@ -1768,7 +1768,7 @@ impl<'a, T> VisitSimdOperator<'a> for WasmProposalValidator<'_, '_, T> where T: WasmModuleResources, { - crate::for_each_simd_operator!(validate_proposal); + for_each_simd_operator!(validate_proposal); } #[track_caller] From 6b9a8831603323a170fdfc65c33dfb21d6aadd55 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Fri, 22 Nov 2024 01:10:49 +0100 Subject: [PATCH 55/83] fix bugs in generator macros --- crates/wasmparser/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/wasmparser/src/lib.rs b/crates/wasmparser/src/lib.rs index af7439380a..6dc3a3fccf 100644 --- a/crates/wasmparser/src/lib.rs +++ b/crates/wasmparser/src/lib.rs @@ -912,7 +912,7 @@ macro_rules! define_for_each_non_simd_operator { define_for_each_non_simd_operator!( filter [ $($t)* - @simd $op $({ $($arg: $argty),* })? => $visit ($($ann)*) + @$proposal $op $({ $($arg: $argty),* })? => $visit ($($ann)*) ] $($rest)* ); @@ -973,7 +973,7 @@ macro_rules! define_for_each_simd_operator { define_for_each_simd_operator!( filter [ $($t)* - @simd $op $({ $($arg: $argty),* })? => $visit ($($ann)*) + @relaxed_simd $op $({ $($arg: $argty),* })? => $visit ($($ann)*) ] $($rest)* ); From 91ddc751494e19f8af7d2fd80dee59acd3029091 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Sat, 23 Nov 2024 15:09:46 +0100 Subject: [PATCH 56/83] improve compile times for macros --- crates/wasmparser/src/lib.rs | 92 ++++++++++++++++++------------------ 1 file changed, 46 insertions(+), 46 deletions(-) diff --git a/crates/wasmparser/src/lib.rs b/crates/wasmparser/src/lib.rs index 6dc3a3fccf..8a7878046b 100644 --- a/crates/wasmparser/src/lib.rs +++ b/crates/wasmparser/src/lib.rs @@ -736,6 +736,31 @@ macro_rules! _for_each_operator { macro_rules! define_for_each_non_simd_operator { (@ $($t:tt)*) => {define_for_each_non_simd_operator!(filter [] @ $($t)*);}; + ( + filter [$($t:tt)*] + @simd $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) $($rest:tt)* + ) => { + define_for_each_non_simd_operator!(filter [$($t)*] $($rest)*); + }; + ( + filter [$($t:tt)*] + @relaxed_simd $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) $($rest:tt)* + ) => { + define_for_each_non_simd_operator!(filter [$($t)*] $($rest)*); + }; + ( + filter [$($t:tt)*] + @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) $($rest:tt)* + ) => { + define_for_each_non_simd_operator!( + filter [ + $($t)* + @$proposal $op $({ $($arg: $argty),* })? => $visit ($($ann)*) + ] + $($rest)* + ); + }; + (filter [$($t:tt)*]) => { /// A helper macro to conveniently iterate over all opcodes recognized by this /// crate. This can be used to work with either the [`Operator`] enumeration or @@ -892,37 +917,43 @@ macro_rules! define_for_each_non_simd_operator { } } }; +} +_for_each_operator!(define_for_each_non_simd_operator); + +#[cfg(feature = "simd")] +macro_rules! define_for_each_simd_operator { + (@ $($t:tt)*) => {define_for_each_simd_operator!(filter [] @ $($t)*);}; ( filter [$($t:tt)*] @simd $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) $($rest:tt)* ) => { - define_for_each_non_simd_operator!(filter [$($t)*] $($rest)*); + define_for_each_simd_operator!( + filter [ + $($t)* + @simd $op $({ $($arg: $argty),* })? => $visit ($($ann)*) + ] + $($rest)* + ); }; ( filter [$($t:tt)*] @relaxed_simd $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) $($rest:tt)* ) => { - define_for_each_non_simd_operator!(filter [$($t)*] $($rest)*); - }; - ( - filter [$($t:tt)*] - @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) $($rest:tt)* - ) => { - define_for_each_non_simd_operator!( + define_for_each_simd_operator!( filter [ $($t)* - @$proposal $op $({ $($arg: $argty),* })? => $visit ($($ann)*) + @relaxed_simd $op $({ $($arg: $argty),* })? => $visit ($($ann)*) ] $($rest)* ); }; -} -_for_each_operator!(define_for_each_non_simd_operator); - -#[cfg(feature = "simd")] -macro_rules! define_for_each_simd_operator { - (@ $($t:tt)*) => {define_for_each_simd_operator!(filter [] @ $($t)*);}; + ( + filter [$($t:tt)*] + @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) $($rest:tt)* + ) => { + define_for_each_simd_operator!(filter [$($t)*] $($rest)*); + }; (filter [$($t:tt)*]) => { /// A helper macro to conveniently iterate over all opcodes recognized by this @@ -953,37 +984,6 @@ macro_rules! define_for_each_simd_operator { } } }; - - ( - filter [$($t:tt)*] - @simd $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) $($rest:tt)* - ) => { - define_for_each_simd_operator!( - filter [ - $($t)* - @simd $op $({ $($arg: $argty),* })? => $visit ($($ann)*) - ] - $($rest)* - ); - }; - ( - filter [$($t:tt)*] - @relaxed_simd $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) $($rest:tt)* - ) => { - define_for_each_simd_operator!( - filter [ - $($t)* - @relaxed_simd $op $({ $($arg: $argty),* })? => $visit ($($ann)*) - ] - $($rest)* - ); - }; - ( - filter [$($t:tt)*] - @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) $($rest:tt)* - ) => { - define_for_each_simd_operator!(filter [$($t)*] $($rest)*); - }; } #[cfg(feature = "simd")] _for_each_operator!(define_for_each_simd_operator); From 821b34d85de9906a24f58e13e2b87b0d23463403 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Mon, 25 Nov 2024 13:56:49 +0100 Subject: [PATCH 57/83] group macro operators together --- crates/wasmparser/src/lib.rs | 39 ++++++++++++++++++++---------------- 1 file changed, 22 insertions(+), 17 deletions(-) diff --git a/crates/wasmparser/src/lib.rs b/crates/wasmparser/src/lib.rs index 8a7878046b..0a8011c244 100644 --- a/crates/wasmparser/src/lib.rs +++ b/crates/wasmparser/src/lib.rs @@ -67,15 +67,6 @@ macro_rules! _for_each_operator { @mvp Loop { blockty: $crate::BlockType } => visit_loop (arity block -> ~block) @mvp If { blockty: $crate::BlockType } => visit_if (arity 1 block -> ~block) @mvp Else => visit_else (arity ~end -> ~end) - @exceptions TryTable { try_table: $crate::TryTable } => visit_try_table (arity try_table -> ~try_table) - @exceptions Throw { tag_index: u32 } => visit_throw (arity tag -> 0) - @exceptions ThrowRef => visit_throw_ref (arity 1 -> 0) - // Deprecated old instructions from the exceptions proposal - @legacy_exceptions Try { blockty: $crate::BlockType } => visit_try (arity block -> ~block) - @legacy_exceptions Catch { tag_index: u32 } => visit_catch (arity ~end -> ~tag) - @legacy_exceptions Rethrow { relative_depth: u32 } => visit_rethrow (arity 0 -> 0) - @legacy_exceptions Delegate { relative_depth: u32 } => visit_delegate (arity ~end -> end) - @legacy_exceptions CatchAll => visit_catch_all (arity ~end -> 0) @mvp End => visit_end (arity implicit_else ~end -> implicit_else end) @mvp Br { relative_depth: u32 } => visit_br (arity br -> 0) @mvp BrIf { relative_depth: u32 } => visit_br_if (arity 1 br -> br) @@ -83,11 +74,8 @@ macro_rules! _for_each_operator { @mvp Return => visit_return (arity ~ret -> 0) @mvp Call { function_index: u32 } => visit_call (arity func -> func) @mvp CallIndirect { type_index: u32, table_index: u32 } => visit_call_indirect (arity 1 type -> type) - @tail_call ReturnCall { function_index: u32 } => visit_return_call (arity func -> 0) - @tail_call ReturnCallIndirect { type_index: u32, table_index: u32 } => visit_return_call_indirect (arity 1 type -> 0) @mvp Drop => visit_drop (arity 1 -> 0) @mvp Select => visit_select (arity 3 -> 1) - @reference_types TypedSelect { ty: $crate::ValType } => visit_typed_select (arity 3 -> 1) @mvp LocalGet { local_index: u32 } => visit_local_get (arity 0 -> 1) @mvp LocalSet { local_index: u32 } => visit_local_set (arity 1 -> 0) @mvp LocalTee { local_index: u32 } => visit_local_tee (arity 1 -> 1) @@ -122,10 +110,6 @@ macro_rules! _for_each_operator { @mvp I64Const { value: i64 } => visit_i64_const (push i64) @mvp F32Const { value: $crate::Ieee32 } => visit_f32_const (push f32) @mvp F64Const { value: $crate::Ieee64 } => visit_f64_const (push f64) - @reference_types RefNull { hty: $crate::HeapType } => visit_ref_null (arity 0 -> 1) - @reference_types RefIsNull => visit_ref_is_null (arity 1 -> 1) - @reference_types RefFunc { function_index: u32 } => visit_ref_func (arity 0 -> 1) - @gc RefEq => visit_ref_eq (arity 2 -> 1) @mvp I32Eqz => visit_i32_eqz (test i32) @mvp I32Eq => visit_i32_eq (cmp i32) @mvp I32Ne => visit_i32_ne (cmp i32) @@ -249,15 +233,17 @@ macro_rules! _for_each_operator { @mvp I64ReinterpretF64 => visit_i64_reinterpret_f64 (conversion i64 f64) @mvp F32ReinterpretI32 => visit_f32_reinterpret_i32 (conversion f32 i32) @mvp F64ReinterpretI64 => visit_f64_reinterpret_i64 (conversion f64 i64) + @sign_extension I32Extend8S => visit_i32_extend8_s (unary i32) @sign_extension I32Extend16S => visit_i32_extend16_s (unary i32) @sign_extension I64Extend8S => visit_i64_extend8_s (unary i64) @sign_extension I64Extend16S => visit_i64_extend16_s (unary i64) @sign_extension I64Extend32S => visit_i64_extend32_s (unary i64) - + // 0xFB prefixed operators // Garbage Collection // http://github.com/WebAssembly/gc + @gc RefEq => visit_ref_eq (arity 2 -> 1) @gc StructNew { struct_type_index: u32 } => visit_struct_new (arity type -> 1) @gc StructNewDefault { struct_type_index: u32 } => visit_struct_new_default (arity 0 -> 1) @gc StructGet { struct_type_index: u32, field_index: u32 } => visit_struct_get (arity 1 -> 1) @@ -324,12 +310,21 @@ macro_rules! _for_each_operator { // 0xFC prefixed operators // reference-types // https://github.com/WebAssembly/reference-types + @reference_types TypedSelect { ty: $crate::ValType } => visit_typed_select (arity 3 -> 1) + @reference_types RefNull { hty: $crate::HeapType } => visit_ref_null (arity 0 -> 1) + @reference_types RefIsNull => visit_ref_is_null (arity 1 -> 1) + @reference_types RefFunc { function_index: u32 } => visit_ref_func (arity 0 -> 1) @reference_types TableFill { table: u32 } => visit_table_fill (arity 3 -> 0) @reference_types TableGet { table: u32 } => visit_table_get (arity 1 -> 1) @reference_types TableSet { table: u32 } => visit_table_set (arity 2 -> 0) @reference_types TableGrow { table: u32 } => visit_table_grow (arity 2 -> 1) @reference_types TableSize { table: u32 } => visit_table_size (arity 0 -> 1) + // Wasm tail-call proposal + // https://github.com/WebAssembly/tail-call + @tail_call ReturnCall { function_index: u32 } => visit_return_call (arity func -> 0) + @tail_call ReturnCallIndirect { type_index: u32, table_index: u32 } => visit_return_call_indirect (arity 1 type -> 0) + // OxFC prefixed operators // memory control (experimental) // https://github.com/WebAssembly/design/issues/1439 @@ -670,6 +665,16 @@ macro_rules! _for_each_operator { @relaxed_simd I16x8RelaxedDotI8x16I7x16S => visit_i16x8_relaxed_dot_i8x16_i7x16_s (binary v128) @relaxed_simd I32x4RelaxedDotI8x16I7x16AddS => visit_i32x4_relaxed_dot_i8x16_i7x16_add_s (ternary v128) + @exceptions TryTable { try_table: $crate::TryTable } => visit_try_table (arity try_table -> ~try_table) + @exceptions Throw { tag_index: u32 } => visit_throw (arity tag -> 0) + @exceptions ThrowRef => visit_throw_ref (arity 1 -> 0) + // Deprecated old instructions from the exceptions proposal + @legacy_exceptions Try { blockty: $crate::BlockType } => visit_try (arity block -> ~block) + @legacy_exceptions Catch { tag_index: u32 } => visit_catch (arity ~end -> ~tag) + @legacy_exceptions Rethrow { relative_depth: u32 } => visit_rethrow (arity 0 -> 0) + @legacy_exceptions Delegate { relative_depth: u32 } => visit_delegate (arity ~end -> end) + @legacy_exceptions CatchAll => visit_catch_all (arity ~end -> 0) + // Also 0xFE prefixed operators // shared-everything threads // https://github.com/WebAssembly/shared-everything-threads From da653b60775e672724ac81527c3e91fdbaf4d1cd Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Mon, 25 Nov 2024 14:19:05 +0100 Subject: [PATCH 58/83] restructure for_each_operator macros This improves compile times performance significantly. --- crates/wasmparser/src/lib.rs | 1323 ++++++++++++++++++---------------- 1 file changed, 687 insertions(+), 636 deletions(-) diff --git a/crates/wasmparser/src/lib.rs b/crates/wasmparser/src/lib.rs index 0a8011c244..82b11f9eb5 100644 --- a/crates/wasmparser/src/lib.rs +++ b/crates/wasmparser/src/lib.rs @@ -26,7 +26,6 @@ //! To get started, create a [`Parser`] using [`Parser::new`] and then follow //! the examples documented for [`Parser::parse`] or [`Parser::parse_all`]. -#![recursion_limit = "1024"] #![deny(missing_docs)] #![no_std] #![cfg_attr(docsrs, feature(doc_auto_cfg))] @@ -61,679 +60,713 @@ mod prelude { macro_rules! _for_each_operator { ($mac:ident) => { $mac! { - @mvp Unreachable => visit_unreachable (arity 0 -> 0) - @mvp Nop => visit_nop (arity 0 -> 0) - @mvp Block { blockty: $crate::BlockType } => visit_block (arity block -> ~block) - @mvp Loop { blockty: $crate::BlockType } => visit_loop (arity block -> ~block) - @mvp If { blockty: $crate::BlockType } => visit_if (arity 1 block -> ~block) - @mvp Else => visit_else (arity ~end -> ~end) - @mvp End => visit_end (arity implicit_else ~end -> implicit_else end) - @mvp Br { relative_depth: u32 } => visit_br (arity br -> 0) - @mvp BrIf { relative_depth: u32 } => visit_br_if (arity 1 br -> br) - @mvp BrTable { targets: $crate::BrTable<'a> } => visit_br_table (arity 1 br_table -> 0) - @mvp Return => visit_return (arity ~ret -> 0) - @mvp Call { function_index: u32 } => visit_call (arity func -> func) - @mvp CallIndirect { type_index: u32, table_index: u32 } => visit_call_indirect (arity 1 type -> type) - @mvp Drop => visit_drop (arity 1 -> 0) - @mvp Select => visit_select (arity 3 -> 1) - @mvp LocalGet { local_index: u32 } => visit_local_get (arity 0 -> 1) - @mvp LocalSet { local_index: u32 } => visit_local_set (arity 1 -> 0) - @mvp LocalTee { local_index: u32 } => visit_local_tee (arity 1 -> 1) - @mvp GlobalGet { global_index: u32 } => visit_global_get (arity 0 -> 1) - @mvp GlobalSet { global_index: u32 } => visit_global_set (arity 1 -> 0) - @mvp I32Load { memarg: $crate::MemArg } => visit_i32_load (load i32) - @mvp I64Load { memarg: $crate::MemArg } => visit_i64_load (load i64) - @mvp F32Load { memarg: $crate::MemArg } => visit_f32_load (load f32) - @mvp F64Load { memarg: $crate::MemArg } => visit_f64_load (load f64) - @mvp I32Load8S { memarg: $crate::MemArg } => visit_i32_load8_s (load i32) - @mvp I32Load8U { memarg: $crate::MemArg } => visit_i32_load8_u (load i32) - @mvp I32Load16S { memarg: $crate::MemArg } => visit_i32_load16_s (load i32) - @mvp I32Load16U { memarg: $crate::MemArg } => visit_i32_load16_u (load i32) - @mvp I64Load8S { memarg: $crate::MemArg } => visit_i64_load8_s (load i64) - @mvp I64Load8U { memarg: $crate::MemArg } => visit_i64_load8_u (load i64) - @mvp I64Load16S { memarg: $crate::MemArg } => visit_i64_load16_s (load i64) - @mvp I64Load16U { memarg: $crate::MemArg } => visit_i64_load16_u (load i64) - @mvp I64Load32S { memarg: $crate::MemArg } => visit_i64_load32_s (load i64) - @mvp I64Load32U { memarg: $crate::MemArg } => visit_i64_load32_u (load i64) - @mvp I32Store { memarg: $crate::MemArg } => visit_i32_store (store i32) - @mvp I64Store { memarg: $crate::MemArg } => visit_i64_store (store i64) - @mvp F32Store { memarg: $crate::MemArg } => visit_f32_store (store f32) - @mvp F64Store { memarg: $crate::MemArg } => visit_f64_store (store f64) - @mvp I32Store8 { memarg: $crate::MemArg } => visit_i32_store8 (store i32) - @mvp I32Store16 { memarg: $crate::MemArg } => visit_i32_store16 (store i32) - @mvp I64Store8 { memarg: $crate::MemArg } => visit_i64_store8 (store i64) - @mvp I64Store16 { memarg: $crate::MemArg } => visit_i64_store16 (store i64) - @mvp I64Store32 { memarg: $crate::MemArg } => visit_i64_store32 (store i64) - @mvp MemorySize { mem: u32 } => visit_memory_size (arity 0 -> 1) - @mvp MemoryGrow { mem: u32 } => visit_memory_grow (arity 1 -> 1) - @mvp I32Const { value: i32 } => visit_i32_const (push i32) - @mvp I64Const { value: i64 } => visit_i64_const (push i64) - @mvp F32Const { value: $crate::Ieee32 } => visit_f32_const (push f32) - @mvp F64Const { value: $crate::Ieee64 } => visit_f64_const (push f64) - @mvp I32Eqz => visit_i32_eqz (test i32) - @mvp I32Eq => visit_i32_eq (cmp i32) - @mvp I32Ne => visit_i32_ne (cmp i32) - @mvp I32LtS => visit_i32_lt_s (cmp i32) - @mvp I32LtU => visit_i32_lt_u (cmp i32) - @mvp I32GtS => visit_i32_gt_s (cmp i32) - @mvp I32GtU => visit_i32_gt_u (cmp i32) - @mvp I32LeS => visit_i32_le_s (cmp i32) - @mvp I32LeU => visit_i32_le_u (cmp i32) - @mvp I32GeS => visit_i32_ge_s (cmp i32) - @mvp I32GeU => visit_i32_ge_u (cmp i32) - @mvp I64Eqz => visit_i64_eqz (test i64) - @mvp I64Eq => visit_i64_eq (cmp i64) - @mvp I64Ne => visit_i64_ne (cmp i64) - @mvp I64LtS => visit_i64_lt_s (cmp i64) - @mvp I64LtU => visit_i64_lt_u (cmp i64) - @mvp I64GtS => visit_i64_gt_s (cmp i64) - @mvp I64GtU => visit_i64_gt_u (cmp i64) - @mvp I64LeS => visit_i64_le_s (cmp i64) - @mvp I64LeU => visit_i64_le_u (cmp i64) - @mvp I64GeS => visit_i64_ge_s (cmp i64) - @mvp I64GeU => visit_i64_ge_u (cmp i64) - @mvp F32Eq => visit_f32_eq (cmp f32) - @mvp F32Ne => visit_f32_ne (cmp f32) - @mvp F32Lt => visit_f32_lt (cmp f32) - @mvp F32Gt => visit_f32_gt (cmp f32) - @mvp F32Le => visit_f32_le (cmp f32) - @mvp F32Ge => visit_f32_ge (cmp f32) - @mvp F64Eq => visit_f64_eq (cmp f64) - @mvp F64Ne => visit_f64_ne (cmp f64) - @mvp F64Lt => visit_f64_lt (cmp f64) - @mvp F64Gt => visit_f64_gt (cmp f64) - @mvp F64Le => visit_f64_le (cmp f64) - @mvp F64Ge => visit_f64_ge (cmp f64) - @mvp I32Clz => visit_i32_clz (unary i32) - @mvp I32Ctz => visit_i32_ctz (unary i32) - @mvp I32Popcnt => visit_i32_popcnt (unary i32) - @mvp I32Add => visit_i32_add (binary i32) - @mvp I32Sub => visit_i32_sub (binary i32) - @mvp I32Mul => visit_i32_mul (binary i32) - @mvp I32DivS => visit_i32_div_s (binary i32) - @mvp I32DivU => visit_i32_div_u (binary i32) - @mvp I32RemS => visit_i32_rem_s (binary i32) - @mvp I32RemU => visit_i32_rem_u (binary i32) - @mvp I32And => visit_i32_and (binary i32) - @mvp I32Or => visit_i32_or (binary i32) - @mvp I32Xor => visit_i32_xor (binary i32) - @mvp I32Shl => visit_i32_shl (binary i32) - @mvp I32ShrS => visit_i32_shr_s (binary i32) - @mvp I32ShrU => visit_i32_shr_u (binary i32) - @mvp I32Rotl => visit_i32_rotl (binary i32) - @mvp I32Rotr => visit_i32_rotr (binary i32) - @mvp I64Clz => visit_i64_clz (unary i64) - @mvp I64Ctz => visit_i64_ctz (unary i64) - @mvp I64Popcnt => visit_i64_popcnt (unary i64) - @mvp I64Add => visit_i64_add (binary i64) - @mvp I64Sub => visit_i64_sub (binary i64) - @mvp I64Mul => visit_i64_mul (binary i64) - @mvp I64DivS => visit_i64_div_s (binary i64) - @mvp I64DivU => visit_i64_div_u (binary i64) - @mvp I64RemS => visit_i64_rem_s (binary i64) - @mvp I64RemU => visit_i64_rem_u (binary i64) - @mvp I64And => visit_i64_and (binary i64) - @mvp I64Or => visit_i64_or (binary i64) - @mvp I64Xor => visit_i64_xor (binary i64) - @mvp I64Shl => visit_i64_shl (binary i64) - @mvp I64ShrS => visit_i64_shr_s (binary i64) - @mvp I64ShrU => visit_i64_shr_u (binary i64) - @mvp I64Rotl => visit_i64_rotl (binary i64) - @mvp I64Rotr => visit_i64_rotr (binary i64) - @mvp F32Abs => visit_f32_abs (unary f32) - @mvp F32Neg => visit_f32_neg (unary f32) - @mvp F32Ceil => visit_f32_ceil (unary f32) - @mvp F32Floor => visit_f32_floor (unary f32) - @mvp F32Trunc => visit_f32_trunc (unary f32) - @mvp F32Nearest => visit_f32_nearest (unary f32) - @mvp F32Sqrt => visit_f32_sqrt (unary f32) - @mvp F32Add => visit_f32_add (binary f32) - @mvp F32Sub => visit_f32_sub (binary f32) - @mvp F32Mul => visit_f32_mul (binary f32) - @mvp F32Div => visit_f32_div (binary f32) - @mvp F32Min => visit_f32_min (binary f32) - @mvp F32Max => visit_f32_max (binary f32) - @mvp F32Copysign => visit_f32_copysign (binary f32) - @mvp F64Abs => visit_f64_abs (unary f64) - @mvp F64Neg => visit_f64_neg (unary f64) - @mvp F64Ceil => visit_f64_ceil (unary f64) - @mvp F64Floor => visit_f64_floor (unary f64) - @mvp F64Trunc => visit_f64_trunc (unary f64) - @mvp F64Nearest => visit_f64_nearest (unary f64) - @mvp F64Sqrt => visit_f64_sqrt (unary f64) - @mvp F64Add => visit_f64_add (binary f64) - @mvp F64Sub => visit_f64_sub (binary f64) - @mvp F64Mul => visit_f64_mul (binary f64) - @mvp F64Div => visit_f64_div (binary f64) - @mvp F64Min => visit_f64_min (binary f64) - @mvp F64Max => visit_f64_max (binary f64) - @mvp F64Copysign => visit_f64_copysign (binary f64) - @mvp I32WrapI64 => visit_i32_wrap_i64 (conversion i32 i64) - @mvp I32TruncF32S => visit_i32_trunc_f32_s (conversion i32 f32) - @mvp I32TruncF32U => visit_i32_trunc_f32_u (conversion i32 f32) - @mvp I32TruncF64S => visit_i32_trunc_f64_s (conversion i32 f64) - @mvp I32TruncF64U => visit_i32_trunc_f64_u (conversion i32 f64) - @mvp I64ExtendI32S => visit_i64_extend_i32_s (conversion i64 i32) - @mvp I64ExtendI32U => visit_i64_extend_i32_u (conversion i64 i32) - @mvp I64TruncF32S => visit_i64_trunc_f32_s (conversion i64 f32) - @mvp I64TruncF32U => visit_i64_trunc_f32_u (conversion i64 f32) - @mvp I64TruncF64S => visit_i64_trunc_f64_s (conversion i64 f64) - @mvp I64TruncF64U => visit_i64_trunc_f64_u (conversion i64 f64) - @mvp F32ConvertI32S => visit_f32_convert_i32_s (conversion f32 i32) - @mvp F32ConvertI32U => visit_f32_convert_i32_u (conversion f32 i32) - @mvp F32ConvertI64S => visit_f32_convert_i64_s (conversion f32 i64) - @mvp F32ConvertI64U => visit_f32_convert_i64_u (conversion f32 i64) - @mvp F32DemoteF64 => visit_f32_demote_f64 (conversion f32 f64) - @mvp F64ConvertI32S => visit_f64_convert_i32_s (conversion f64 i32) - @mvp F64ConvertI32U => visit_f64_convert_i32_u (conversion f64 i32) - @mvp F64ConvertI64S => visit_f64_convert_i64_s (conversion f64 i64) - @mvp F64ConvertI64U => visit_f64_convert_i64_u (conversion f64 i64) - @mvp F64PromoteF32 => visit_f64_promote_f32 (conversion f64 f32) - @mvp I32ReinterpretF32 => visit_i32_reinterpret_f32 (conversion i32 f32) - @mvp I64ReinterpretF64 => visit_i64_reinterpret_f64 (conversion i64 f64) - @mvp F32ReinterpretI32 => visit_f32_reinterpret_i32 (conversion f32 i32) - @mvp F64ReinterpretI64 => visit_f64_reinterpret_i64 (conversion f64 i64) + @mvp { + Unreachable => visit_unreachable (arity 0 -> 0) + Nop => visit_nop (arity 0 -> 0) + Block { blockty: $crate::BlockType } => visit_block (arity block -> ~block) + Loop { blockty: $crate::BlockType } => visit_loop (arity block -> ~block) + If { blockty: $crate::BlockType } => visit_if (arity 1 block -> ~block) + Else => visit_else (arity ~end -> ~end) + End => visit_end (arity implicit_else ~end -> implicit_else end) + Br { relative_depth: u32 } => visit_br (arity br -> 0) + BrIf { relative_depth: u32 } => visit_br_if (arity 1 br -> br) + BrTable { targets: $crate::BrTable<'a> } => visit_br_table (arity 1 br_table -> 0) + Return => visit_return (arity ~ret -> 0) + Call { function_index: u32 } => visit_call (arity func -> func) + CallIndirect { type_index: u32, table_index: u32 } => visit_call_indirect (arity 1 type -> type) + Drop => visit_drop (arity 1 -> 0) + Select => visit_select (arity 3 -> 1) + LocalGet { local_index: u32 } => visit_local_get (arity 0 -> 1) + LocalSet { local_index: u32 } => visit_local_set (arity 1 -> 0) + LocalTee { local_index: u32 } => visit_local_tee (arity 1 -> 1) + GlobalGet { global_index: u32 } => visit_global_get (arity 0 -> 1) + GlobalSet { global_index: u32 } => visit_global_set (arity 1 -> 0) + I32Load { memarg: $crate::MemArg } => visit_i32_load (load i32) + I64Load { memarg: $crate::MemArg } => visit_i64_load (load i64) + F32Load { memarg: $crate::MemArg } => visit_f32_load (load f32) + F64Load { memarg: $crate::MemArg } => visit_f64_load (load f64) + I32Load8S { memarg: $crate::MemArg } => visit_i32_load8_s (load i32) + I32Load8U { memarg: $crate::MemArg } => visit_i32_load8_u (load i32) + I32Load16S { memarg: $crate::MemArg } => visit_i32_load16_s (load i32) + I32Load16U { memarg: $crate::MemArg } => visit_i32_load16_u (load i32) + I64Load8S { memarg: $crate::MemArg } => visit_i64_load8_s (load i64) + I64Load8U { memarg: $crate::MemArg } => visit_i64_load8_u (load i64) + I64Load16S { memarg: $crate::MemArg } => visit_i64_load16_s (load i64) + I64Load16U { memarg: $crate::MemArg } => visit_i64_load16_u (load i64) + I64Load32S { memarg: $crate::MemArg } => visit_i64_load32_s (load i64) + I64Load32U { memarg: $crate::MemArg } => visit_i64_load32_u (load i64) + I32Store { memarg: $crate::MemArg } => visit_i32_store (store i32) + I64Store { memarg: $crate::MemArg } => visit_i64_store (store i64) + F32Store { memarg: $crate::MemArg } => visit_f32_store (store f32) + F64Store { memarg: $crate::MemArg } => visit_f64_store (store f64) + I32Store8 { memarg: $crate::MemArg } => visit_i32_store8 (store i32) + I32Store16 { memarg: $crate::MemArg } => visit_i32_store16 (store i32) + I64Store8 { memarg: $crate::MemArg } => visit_i64_store8 (store i64) + I64Store16 { memarg: $crate::MemArg } => visit_i64_store16 (store i64) + I64Store32 { memarg: $crate::MemArg } => visit_i64_store32 (store i64) + MemorySize { mem: u32 } => visit_memory_size (arity 0 -> 1) + MemoryGrow { mem: u32 } => visit_memory_grow (arity 1 -> 1) + I32Const { value: i32 } => visit_i32_const (push i32) + I64Const { value: i64 } => visit_i64_const (push i64) + F32Const { value: $crate::Ieee32 } => visit_f32_const (push f32) + F64Const { value: $crate::Ieee64 } => visit_f64_const (push f64) + I32Eqz => visit_i32_eqz (test i32) + I32Eq => visit_i32_eq (cmp i32) + I32Ne => visit_i32_ne (cmp i32) + I32LtS => visit_i32_lt_s (cmp i32) + I32LtU => visit_i32_lt_u (cmp i32) + I32GtS => visit_i32_gt_s (cmp i32) + I32GtU => visit_i32_gt_u (cmp i32) + I32LeS => visit_i32_le_s (cmp i32) + I32LeU => visit_i32_le_u (cmp i32) + I32GeS => visit_i32_ge_s (cmp i32) + I32GeU => visit_i32_ge_u (cmp i32) + I64Eqz => visit_i64_eqz (test i64) + I64Eq => visit_i64_eq (cmp i64) + I64Ne => visit_i64_ne (cmp i64) + I64LtS => visit_i64_lt_s (cmp i64) + I64LtU => visit_i64_lt_u (cmp i64) + I64GtS => visit_i64_gt_s (cmp i64) + I64GtU => visit_i64_gt_u (cmp i64) + I64LeS => visit_i64_le_s (cmp i64) + I64LeU => visit_i64_le_u (cmp i64) + I64GeS => visit_i64_ge_s (cmp i64) + I64GeU => visit_i64_ge_u (cmp i64) + F32Eq => visit_f32_eq (cmp f32) + F32Ne => visit_f32_ne (cmp f32) + F32Lt => visit_f32_lt (cmp f32) + F32Gt => visit_f32_gt (cmp f32) + F32Le => visit_f32_le (cmp f32) + F32Ge => visit_f32_ge (cmp f32) + F64Eq => visit_f64_eq (cmp f64) + F64Ne => visit_f64_ne (cmp f64) + F64Lt => visit_f64_lt (cmp f64) + F64Gt => visit_f64_gt (cmp f64) + F64Le => visit_f64_le (cmp f64) + F64Ge => visit_f64_ge (cmp f64) + I32Clz => visit_i32_clz (unary i32) + I32Ctz => visit_i32_ctz (unary i32) + I32Popcnt => visit_i32_popcnt (unary i32) + I32Add => visit_i32_add (binary i32) + I32Sub => visit_i32_sub (binary i32) + I32Mul => visit_i32_mul (binary i32) + I32DivS => visit_i32_div_s (binary i32) + I32DivU => visit_i32_div_u (binary i32) + I32RemS => visit_i32_rem_s (binary i32) + I32RemU => visit_i32_rem_u (binary i32) + I32And => visit_i32_and (binary i32) + I32Or => visit_i32_or (binary i32) + I32Xor => visit_i32_xor (binary i32) + I32Shl => visit_i32_shl (binary i32) + I32ShrS => visit_i32_shr_s (binary i32) + I32ShrU => visit_i32_shr_u (binary i32) + I32Rotl => visit_i32_rotl (binary i32) + I32Rotr => visit_i32_rotr (binary i32) + I64Clz => visit_i64_clz (unary i64) + I64Ctz => visit_i64_ctz (unary i64) + I64Popcnt => visit_i64_popcnt (unary i64) + I64Add => visit_i64_add (binary i64) + I64Sub => visit_i64_sub (binary i64) + I64Mul => visit_i64_mul (binary i64) + I64DivS => visit_i64_div_s (binary i64) + I64DivU => visit_i64_div_u (binary i64) + I64RemS => visit_i64_rem_s (binary i64) + I64RemU => visit_i64_rem_u (binary i64) + I64And => visit_i64_and (binary i64) + I64Or => visit_i64_or (binary i64) + I64Xor => visit_i64_xor (binary i64) + I64Shl => visit_i64_shl (binary i64) + I64ShrS => visit_i64_shr_s (binary i64) + I64ShrU => visit_i64_shr_u (binary i64) + I64Rotl => visit_i64_rotl (binary i64) + I64Rotr => visit_i64_rotr (binary i64) + F32Abs => visit_f32_abs (unary f32) + F32Neg => visit_f32_neg (unary f32) + F32Ceil => visit_f32_ceil (unary f32) + F32Floor => visit_f32_floor (unary f32) + F32Trunc => visit_f32_trunc (unary f32) + F32Nearest => visit_f32_nearest (unary f32) + F32Sqrt => visit_f32_sqrt (unary f32) + F32Add => visit_f32_add (binary f32) + F32Sub => visit_f32_sub (binary f32) + F32Mul => visit_f32_mul (binary f32) + F32Div => visit_f32_div (binary f32) + F32Min => visit_f32_min (binary f32) + F32Max => visit_f32_max (binary f32) + F32Copysign => visit_f32_copysign (binary f32) + F64Abs => visit_f64_abs (unary f64) + F64Neg => visit_f64_neg (unary f64) + F64Ceil => visit_f64_ceil (unary f64) + F64Floor => visit_f64_floor (unary f64) + F64Trunc => visit_f64_trunc (unary f64) + F64Nearest => visit_f64_nearest (unary f64) + F64Sqrt => visit_f64_sqrt (unary f64) + F64Add => visit_f64_add (binary f64) + F64Sub => visit_f64_sub (binary f64) + F64Mul => visit_f64_mul (binary f64) + F64Div => visit_f64_div (binary f64) + F64Min => visit_f64_min (binary f64) + F64Max => visit_f64_max (binary f64) + F64Copysign => visit_f64_copysign (binary f64) + I32WrapI64 => visit_i32_wrap_i64 (conversion i32 i64) + I32TruncF32S => visit_i32_trunc_f32_s (conversion i32 f32) + I32TruncF32U => visit_i32_trunc_f32_u (conversion i32 f32) + I32TruncF64S => visit_i32_trunc_f64_s (conversion i32 f64) + I32TruncF64U => visit_i32_trunc_f64_u (conversion i32 f64) + I64ExtendI32S => visit_i64_extend_i32_s (conversion i64 i32) + I64ExtendI32U => visit_i64_extend_i32_u (conversion i64 i32) + I64TruncF32S => visit_i64_trunc_f32_s (conversion i64 f32) + I64TruncF32U => visit_i64_trunc_f32_u (conversion i64 f32) + I64TruncF64S => visit_i64_trunc_f64_s (conversion i64 f64) + I64TruncF64U => visit_i64_trunc_f64_u (conversion i64 f64) + F32ConvertI32S => visit_f32_convert_i32_s (conversion f32 i32) + F32ConvertI32U => visit_f32_convert_i32_u (conversion f32 i32) + F32ConvertI64S => visit_f32_convert_i64_s (conversion f32 i64) + F32ConvertI64U => visit_f32_convert_i64_u (conversion f32 i64) + F32DemoteF64 => visit_f32_demote_f64 (conversion f32 f64) + F64ConvertI32S => visit_f64_convert_i32_s (conversion f64 i32) + F64ConvertI32U => visit_f64_convert_i32_u (conversion f64 i32) + F64ConvertI64S => visit_f64_convert_i64_s (conversion f64 i64) + F64ConvertI64U => visit_f64_convert_i64_u (conversion f64 i64) + F64PromoteF32 => visit_f64_promote_f32 (conversion f64 f32) + I32ReinterpretF32 => visit_i32_reinterpret_f32 (conversion i32 f32) + I64ReinterpretF64 => visit_i64_reinterpret_f64 (conversion i64 f64) + F32ReinterpretI32 => visit_f32_reinterpret_i32 (conversion f32 i32) + F64ReinterpretI64 => visit_f64_reinterpret_i64 (conversion f64 i64) + } - @sign_extension I32Extend8S => visit_i32_extend8_s (unary i32) - @sign_extension I32Extend16S => visit_i32_extend16_s (unary i32) - @sign_extension I64Extend8S => visit_i64_extend8_s (unary i64) - @sign_extension I64Extend16S => visit_i64_extend16_s (unary i64) - @sign_extension I64Extend32S => visit_i64_extend32_s (unary i64) + @sign_extension { + I32Extend8S => visit_i32_extend8_s (unary i32) + I32Extend16S => visit_i32_extend16_s (unary i32) + I64Extend8S => visit_i64_extend8_s (unary i64) + I64Extend16S => visit_i64_extend16_s (unary i64) + I64Extend32S => visit_i64_extend32_s (unary i64) + } // 0xFB prefixed operators // Garbage Collection // http://github.com/WebAssembly/gc - @gc RefEq => visit_ref_eq (arity 2 -> 1) - @gc StructNew { struct_type_index: u32 } => visit_struct_new (arity type -> 1) - @gc StructNewDefault { struct_type_index: u32 } => visit_struct_new_default (arity 0 -> 1) - @gc StructGet { struct_type_index: u32, field_index: u32 } => visit_struct_get (arity 1 -> 1) - @gc StructGetS { struct_type_index: u32, field_index: u32 } => visit_struct_get_s (arity 1 -> 1) - @gc StructGetU { struct_type_index: u32, field_index: u32 } => visit_struct_get_u (arity 1 -> 1) - @gc StructSet { struct_type_index: u32, field_index: u32 } => visit_struct_set (arity 2 -> 0) - @gc ArrayNew { array_type_index: u32 } => visit_array_new (arity 2 -> 1) - @gc ArrayNewDefault { array_type_index: u32 } => visit_array_new_default (arity 1 -> 1) - @gc ArrayNewFixed { array_type_index: u32, array_size: u32 } => visit_array_new_fixed (arity size -> 1) - @gc ArrayNewData { array_type_index: u32, array_data_index: u32 } => visit_array_new_data (arity 2 -> 1) - @gc ArrayNewElem { array_type_index: u32, array_elem_index: u32 } => visit_array_new_elem (arity 2 -> 1) - @gc ArrayGet { array_type_index: u32 } => visit_array_get (arity 2 -> 1) - @gc ArrayGetS { array_type_index: u32 } => visit_array_get_s (arity 2 -> 1) - @gc ArrayGetU { array_type_index: u32 } => visit_array_get_u (arity 2 -> 1) - @gc ArraySet { array_type_index: u32 } => visit_array_set (arity 3 -> 0) - @gc ArrayLen => visit_array_len (arity 1 -> 1) - @gc ArrayFill { array_type_index: u32 } => visit_array_fill (arity 4 -> 0) - @gc ArrayCopy { array_type_index_dst: u32, array_type_index_src: u32 } => visit_array_copy (arity 5 -> 0) - @gc ArrayInitData { array_type_index: u32, array_data_index: u32 } => visit_array_init_data (arity 4 -> 0) - @gc ArrayInitElem { array_type_index: u32, array_elem_index: u32 } => visit_array_init_elem (arity 4 -> 0) - @gc RefTestNonNull { hty: $crate::HeapType } => visit_ref_test_non_null (arity 1 -> 1) - @gc RefTestNullable { hty: $crate::HeapType } => visit_ref_test_nullable (arity 1 -> 1) - @gc RefCastNonNull { hty: $crate::HeapType } => visit_ref_cast_non_null (arity 1 -> 1) - @gc RefCastNullable { hty: $crate::HeapType } => visit_ref_cast_nullable (arity 1 -> 1) - @gc BrOnCast { - relative_depth: u32, - from_ref_type: $crate::RefType, - to_ref_type: $crate::RefType - } => visit_br_on_cast (arity br -> br) - @gc BrOnCastFail { - relative_depth: u32, - from_ref_type: $crate::RefType, - to_ref_type: $crate::RefType - } => visit_br_on_cast_fail (arity br -> br) - @gc AnyConvertExtern => visit_any_convert_extern (arity 1 -> 1) - @gc ExternConvertAny => visit_extern_convert_any (arity 1 -> 1) - @gc RefI31 => visit_ref_i31 (arity 1 -> 1) - @gc I31GetS => visit_i31_get_s (arity 1 -> 1) - @gc I31GetU => visit_i31_get_u (arity 1 -> 1) + @gc { + RefEq => visit_ref_eq (arity 2 -> 1) + StructNew { struct_type_index: u32 } => visit_struct_new (arity type -> 1) + StructNewDefault { struct_type_index: u32 } => visit_struct_new_default (arity 0 -> 1) + StructGet { struct_type_index: u32, field_index: u32 } => visit_struct_get (arity 1 -> 1) + StructGetS { struct_type_index: u32, field_index: u32 } => visit_struct_get_s (arity 1 -> 1) + StructGetU { struct_type_index: u32, field_index: u32 } => visit_struct_get_u (arity 1 -> 1) + StructSet { struct_type_index: u32, field_index: u32 } => visit_struct_set (arity 2 -> 0) + ArrayNew { array_type_index: u32 } => visit_array_new (arity 2 -> 1) + ArrayNewDefault { array_type_index: u32 } => visit_array_new_default (arity 1 -> 1) + ArrayNewFixed { array_type_index: u32, array_size: u32 } => visit_array_new_fixed (arity size -> 1) + ArrayNewData { array_type_index: u32, array_data_index: u32 } => visit_array_new_data (arity 2 -> 1) + ArrayNewElem { array_type_index: u32, array_elem_index: u32 } => visit_array_new_elem (arity 2 -> 1) + ArrayGet { array_type_index: u32 } => visit_array_get (arity 2 -> 1) + ArrayGetS { array_type_index: u32 } => visit_array_get_s (arity 2 -> 1) + ArrayGetU { array_type_index: u32 } => visit_array_get_u (arity 2 -> 1) + ArraySet { array_type_index: u32 } => visit_array_set (arity 3 -> 0) + ArrayLen => visit_array_len (arity 1 -> 1) + ArrayFill { array_type_index: u32 } => visit_array_fill (arity 4 -> 0) + ArrayCopy { array_type_index_dst: u32, array_type_index_src: u32 } => visit_array_copy (arity 5 -> 0) + ArrayInitData { array_type_index: u32, array_data_index: u32 } => visit_array_init_data (arity 4 -> 0) + ArrayInitElem { array_type_index: u32, array_elem_index: u32 } => visit_array_init_elem (arity 4 -> 0) + RefTestNonNull { hty: $crate::HeapType } => visit_ref_test_non_null (arity 1 -> 1) + RefTestNullable { hty: $crate::HeapType } => visit_ref_test_nullable (arity 1 -> 1) + RefCastNonNull { hty: $crate::HeapType } => visit_ref_cast_non_null (arity 1 -> 1) + RefCastNullable { hty: $crate::HeapType } => visit_ref_cast_nullable (arity 1 -> 1) + BrOnCast { + relative_depth: u32, + from_ref_type: $crate::RefType, + to_ref_type: $crate::RefType + } => visit_br_on_cast (arity br -> br) + BrOnCastFail { + relative_depth: u32, + from_ref_type: $crate::RefType, + to_ref_type: $crate::RefType + } => visit_br_on_cast_fail (arity br -> br) + AnyConvertExtern => visit_any_convert_extern (arity 1 -> 1) + ExternConvertAny => visit_extern_convert_any (arity 1 -> 1) + RefI31 => visit_ref_i31 (arity 1 -> 1) + I31GetS => visit_i31_get_s (arity 1 -> 1) + I31GetU => visit_i31_get_u (arity 1 -> 1) + } // 0xFC operators // Non-trapping Float-to-int Conversions // https://github.com/WebAssembly/nontrapping-float-to-int-conversions - @saturating_float_to_int I32TruncSatF32S => visit_i32_trunc_sat_f32_s (conversion i32 f32) - @saturating_float_to_int I32TruncSatF32U => visit_i32_trunc_sat_f32_u (conversion i32 f32) - @saturating_float_to_int I32TruncSatF64S => visit_i32_trunc_sat_f64_s (conversion i32 f64) - @saturating_float_to_int I32TruncSatF64U => visit_i32_trunc_sat_f64_u (conversion i32 f64) - @saturating_float_to_int I64TruncSatF32S => visit_i64_trunc_sat_f32_s (conversion i64 f32) - @saturating_float_to_int I64TruncSatF32U => visit_i64_trunc_sat_f32_u (conversion i64 f32) - @saturating_float_to_int I64TruncSatF64S => visit_i64_trunc_sat_f64_s (conversion i64 f64) - @saturating_float_to_int I64TruncSatF64U => visit_i64_trunc_sat_f64_u (conversion i64 f64) + @saturating_float_to_int { + I32TruncSatF32S => visit_i32_trunc_sat_f32_s (conversion i32 f32) + I32TruncSatF32U => visit_i32_trunc_sat_f32_u (conversion i32 f32) + I32TruncSatF64S => visit_i32_trunc_sat_f64_s (conversion i32 f64) + I32TruncSatF64U => visit_i32_trunc_sat_f64_u (conversion i32 f64) + I64TruncSatF32S => visit_i64_trunc_sat_f32_s (conversion i64 f32) + I64TruncSatF32U => visit_i64_trunc_sat_f32_u (conversion i64 f32) + I64TruncSatF64S => visit_i64_trunc_sat_f64_s (conversion i64 f64) + I64TruncSatF64U => visit_i64_trunc_sat_f64_u (conversion i64 f64) + } // 0xFC prefixed operators // bulk memory operations // https://github.com/WebAssembly/bulk-memory-operations - @bulk_memory MemoryInit { data_index: u32, mem: u32 } => visit_memory_init (arity 3 -> 0) - @bulk_memory DataDrop { data_index: u32 } => visit_data_drop (arity 0 -> 0) - @bulk_memory MemoryCopy { dst_mem: u32, src_mem: u32 } => visit_memory_copy (arity 3 -> 0) - @bulk_memory MemoryFill { mem: u32 } => visit_memory_fill (arity 3 -> 0) - @bulk_memory TableInit { elem_index: u32, table: u32 } => visit_table_init (arity 3 -> 0) - @bulk_memory ElemDrop { elem_index: u32 } => visit_elem_drop (arity 0 -> 0) - @bulk_memory TableCopy { dst_table: u32, src_table: u32 } => visit_table_copy (arity 3 -> 0) + @bulk_memory { + MemoryInit { data_index: u32, mem: u32 } => visit_memory_init (arity 3 -> 0) + DataDrop { data_index: u32 } => visit_data_drop (arity 0 -> 0) + MemoryCopy { dst_mem: u32, src_mem: u32 } => visit_memory_copy (arity 3 -> 0) + MemoryFill { mem: u32 } => visit_memory_fill (arity 3 -> 0) + TableInit { elem_index: u32, table: u32 } => visit_table_init (arity 3 -> 0) + ElemDrop { elem_index: u32 } => visit_elem_drop (arity 0 -> 0) + TableCopy { dst_table: u32, src_table: u32 } => visit_table_copy (arity 3 -> 0) + } // 0xFC prefixed operators // reference-types // https://github.com/WebAssembly/reference-types - @reference_types TypedSelect { ty: $crate::ValType } => visit_typed_select (arity 3 -> 1) - @reference_types RefNull { hty: $crate::HeapType } => visit_ref_null (arity 0 -> 1) - @reference_types RefIsNull => visit_ref_is_null (arity 1 -> 1) - @reference_types RefFunc { function_index: u32 } => visit_ref_func (arity 0 -> 1) - @reference_types TableFill { table: u32 } => visit_table_fill (arity 3 -> 0) - @reference_types TableGet { table: u32 } => visit_table_get (arity 1 -> 1) - @reference_types TableSet { table: u32 } => visit_table_set (arity 2 -> 0) - @reference_types TableGrow { table: u32 } => visit_table_grow (arity 2 -> 1) - @reference_types TableSize { table: u32 } => visit_table_size (arity 0 -> 1) + @reference_types { + TypedSelect { ty: $crate::ValType } => visit_typed_select (arity 3 -> 1) + RefNull { hty: $crate::HeapType } => visit_ref_null (arity 0 -> 1) + RefIsNull => visit_ref_is_null (arity 1 -> 1) + RefFunc { function_index: u32 } => visit_ref_func (arity 0 -> 1) + TableFill { table: u32 } => visit_table_fill (arity 3 -> 0) + TableGet { table: u32 } => visit_table_get (arity 1 -> 1) + TableSet { table: u32 } => visit_table_set (arity 2 -> 0) + TableGrow { table: u32 } => visit_table_grow (arity 2 -> 1) + TableSize { table: u32 } => visit_table_size (arity 0 -> 1) + } // Wasm tail-call proposal // https://github.com/WebAssembly/tail-call - @tail_call ReturnCall { function_index: u32 } => visit_return_call (arity func -> 0) - @tail_call ReturnCallIndirect { type_index: u32, table_index: u32 } => visit_return_call_indirect (arity 1 type -> 0) + @tail_call { + ReturnCall { function_index: u32 } => visit_return_call (arity func -> 0) + ReturnCallIndirect { type_index: u32, table_index: u32 } => visit_return_call_indirect (arity 1 type -> 0) + } // OxFC prefixed operators // memory control (experimental) // https://github.com/WebAssembly/design/issues/1439 - @memory_control MemoryDiscard { mem: u32 } => visit_memory_discard (arity 2 -> 0) + @memory_control { + MemoryDiscard { mem: u32 } => visit_memory_discard (arity 2 -> 0) + } // 0xFE prefixed operators // threads // https://github.com/WebAssembly/threads - @threads MemoryAtomicNotify { memarg: $crate::MemArg } => visit_memory_atomic_notify (atomic rmw i32) - @threads MemoryAtomicWait32 { memarg: $crate::MemArg } => visit_memory_atomic_wait32 (arity 3 -> 1) - @threads MemoryAtomicWait64 { memarg: $crate::MemArg } => visit_memory_atomic_wait64 (arity 3 -> 1) - @threads AtomicFence => visit_atomic_fence (arity 0 -> 0) - @threads I32AtomicLoad { memarg: $crate::MemArg } => visit_i32_atomic_load (load atomic i32) - @threads I64AtomicLoad { memarg: $crate::MemArg } => visit_i64_atomic_load (load atomic i64) - @threads I32AtomicLoad8U { memarg: $crate::MemArg } => visit_i32_atomic_load8_u (load atomic i32) - @threads I32AtomicLoad16U { memarg: $crate::MemArg } => visit_i32_atomic_load16_u (load atomic i32) - @threads I64AtomicLoad8U { memarg: $crate::MemArg } => visit_i64_atomic_load8_u (load atomic i64) - @threads I64AtomicLoad16U { memarg: $crate::MemArg } => visit_i64_atomic_load16_u (load atomic i64) - @threads I64AtomicLoad32U { memarg: $crate::MemArg } => visit_i64_atomic_load32_u (load atomic i64) - @threads I32AtomicStore { memarg: $crate::MemArg } => visit_i32_atomic_store (store atomic i32) - @threads I64AtomicStore { memarg: $crate::MemArg } => visit_i64_atomic_store (store atomic i64) - @threads I32AtomicStore8 { memarg: $crate::MemArg } => visit_i32_atomic_store8 (store atomic i32) - @threads I32AtomicStore16 { memarg: $crate::MemArg } => visit_i32_atomic_store16 (store atomic i32) - @threads I64AtomicStore8 { memarg: $crate::MemArg } => visit_i64_atomic_store8 (store atomic i64) - @threads I64AtomicStore16 { memarg: $crate::MemArg } => visit_i64_atomic_store16 (store atomic i64) - @threads I64AtomicStore32 { memarg: $crate::MemArg } => visit_i64_atomic_store32 (store atomic i64) - @threads I32AtomicRmwAdd { memarg: $crate::MemArg } => visit_i32_atomic_rmw_add (atomic rmw i32) - @threads I64AtomicRmwAdd { memarg: $crate::MemArg } => visit_i64_atomic_rmw_add (atomic rmw i64) - @threads I32AtomicRmw8AddU { memarg: $crate::MemArg } => visit_i32_atomic_rmw8_add_u (atomic rmw i32) - @threads I32AtomicRmw16AddU { memarg: $crate::MemArg } => visit_i32_atomic_rmw16_add_u (atomic rmw i32) - @threads I64AtomicRmw8AddU { memarg: $crate::MemArg } => visit_i64_atomic_rmw8_add_u (atomic rmw i64) - @threads I64AtomicRmw16AddU { memarg: $crate::MemArg } => visit_i64_atomic_rmw16_add_u (atomic rmw i64) - @threads I64AtomicRmw32AddU { memarg: $crate::MemArg } => visit_i64_atomic_rmw32_add_u (atomic rmw i64) - @threads I32AtomicRmwSub { memarg: $crate::MemArg } => visit_i32_atomic_rmw_sub (atomic rmw i32) - @threads I64AtomicRmwSub { memarg: $crate::MemArg } => visit_i64_atomic_rmw_sub (atomic rmw i64) - @threads I32AtomicRmw8SubU { memarg: $crate::MemArg } => visit_i32_atomic_rmw8_sub_u (atomic rmw i32) - @threads I32AtomicRmw16SubU { memarg: $crate::MemArg } => visit_i32_atomic_rmw16_sub_u (atomic rmw i32) - @threads I64AtomicRmw8SubU { memarg: $crate::MemArg } => visit_i64_atomic_rmw8_sub_u (atomic rmw i64) - @threads I64AtomicRmw16SubU { memarg: $crate::MemArg } => visit_i64_atomic_rmw16_sub_u (atomic rmw i64) - @threads I64AtomicRmw32SubU { memarg: $crate::MemArg } => visit_i64_atomic_rmw32_sub_u (atomic rmw i64) - @threads I32AtomicRmwAnd { memarg: $crate::MemArg } => visit_i32_atomic_rmw_and (atomic rmw i32) - @threads I64AtomicRmwAnd { memarg: $crate::MemArg } => visit_i64_atomic_rmw_and (atomic rmw i64) - @threads I32AtomicRmw8AndU { memarg: $crate::MemArg } => visit_i32_atomic_rmw8_and_u (atomic rmw i32) - @threads I32AtomicRmw16AndU { memarg: $crate::MemArg } => visit_i32_atomic_rmw16_and_u (atomic rmw i32) - @threads I64AtomicRmw8AndU { memarg: $crate::MemArg } => visit_i64_atomic_rmw8_and_u (atomic rmw i64) - @threads I64AtomicRmw16AndU { memarg: $crate::MemArg } => visit_i64_atomic_rmw16_and_u (atomic rmw i64) - @threads I64AtomicRmw32AndU { memarg: $crate::MemArg } => visit_i64_atomic_rmw32_and_u (atomic rmw i64) - @threads I32AtomicRmwOr { memarg: $crate::MemArg } => visit_i32_atomic_rmw_or (atomic rmw i32) - @threads I64AtomicRmwOr { memarg: $crate::MemArg } => visit_i64_atomic_rmw_or (atomic rmw i64) - @threads I32AtomicRmw8OrU { memarg: $crate::MemArg } => visit_i32_atomic_rmw8_or_u (atomic rmw i32) - @threads I32AtomicRmw16OrU { memarg: $crate::MemArg } => visit_i32_atomic_rmw16_or_u (atomic rmw i32) - @threads I64AtomicRmw8OrU { memarg: $crate::MemArg } => visit_i64_atomic_rmw8_or_u (atomic rmw i64) - @threads I64AtomicRmw16OrU { memarg: $crate::MemArg } => visit_i64_atomic_rmw16_or_u (atomic rmw i64) - @threads I64AtomicRmw32OrU { memarg: $crate::MemArg } => visit_i64_atomic_rmw32_or_u (atomic rmw i64) - @threads I32AtomicRmwXor { memarg: $crate::MemArg } => visit_i32_atomic_rmw_xor (atomic rmw i32) - @threads I64AtomicRmwXor { memarg: $crate::MemArg } => visit_i64_atomic_rmw_xor (atomic rmw i64) - @threads I32AtomicRmw8XorU { memarg: $crate::MemArg } => visit_i32_atomic_rmw8_xor_u (atomic rmw i32) - @threads I32AtomicRmw16XorU { memarg: $crate::MemArg } => visit_i32_atomic_rmw16_xor_u (atomic rmw i32) - @threads I64AtomicRmw8XorU { memarg: $crate::MemArg } => visit_i64_atomic_rmw8_xor_u (atomic rmw i64) - @threads I64AtomicRmw16XorU { memarg: $crate::MemArg } => visit_i64_atomic_rmw16_xor_u (atomic rmw i64) - @threads I64AtomicRmw32XorU { memarg: $crate::MemArg } => visit_i64_atomic_rmw32_xor_u (atomic rmw i64) - @threads I32AtomicRmwXchg { memarg: $crate::MemArg } => visit_i32_atomic_rmw_xchg (atomic rmw i32) - @threads I64AtomicRmwXchg { memarg: $crate::MemArg } => visit_i64_atomic_rmw_xchg (atomic rmw i64) - @threads I32AtomicRmw8XchgU { memarg: $crate::MemArg } => visit_i32_atomic_rmw8_xchg_u (atomic rmw i32) - @threads I32AtomicRmw16XchgU { memarg: $crate::MemArg } => visit_i32_atomic_rmw16_xchg_u (atomic rmw i32) - @threads I64AtomicRmw8XchgU { memarg: $crate::MemArg } => visit_i64_atomic_rmw8_xchg_u (atomic rmw i64) - @threads I64AtomicRmw16XchgU { memarg: $crate::MemArg } => visit_i64_atomic_rmw16_xchg_u (atomic rmw i64) - @threads I64AtomicRmw32XchgU { memarg: $crate::MemArg } => visit_i64_atomic_rmw32_xchg_u (atomic rmw i64) - @threads I32AtomicRmwCmpxchg { memarg: $crate::MemArg } => visit_i32_atomic_rmw_cmpxchg (atomic cmpxchg i32) - @threads I64AtomicRmwCmpxchg { memarg: $crate::MemArg } => visit_i64_atomic_rmw_cmpxchg (atomic cmpxchg i64) - @threads I32AtomicRmw8CmpxchgU { memarg: $crate::MemArg } => visit_i32_atomic_rmw8_cmpxchg_u (atomic cmpxchg i32) - @threads I32AtomicRmw16CmpxchgU { memarg: $crate::MemArg } => visit_i32_atomic_rmw16_cmpxchg_u (atomic cmpxchg i32) - @threads I64AtomicRmw8CmpxchgU { memarg: $crate::MemArg } => visit_i64_atomic_rmw8_cmpxchg_u (atomic cmpxchg i64) - @threads I64AtomicRmw16CmpxchgU { memarg: $crate::MemArg } => visit_i64_atomic_rmw16_cmpxchg_u (atomic cmpxchg i64) - @threads I64AtomicRmw32CmpxchgU { memarg: $crate::MemArg } => visit_i64_atomic_rmw32_cmpxchg_u (atomic cmpxchg i64) + @threads { + MemoryAtomicNotify { memarg: $crate::MemArg } => visit_memory_atomic_notify (atomic rmw i32) + MemoryAtomicWait32 { memarg: $crate::MemArg } => visit_memory_atomic_wait32 (arity 3 -> 1) + MemoryAtomicWait64 { memarg: $crate::MemArg } => visit_memory_atomic_wait64 (arity 3 -> 1) + AtomicFence => visit_atomic_fence (arity 0 -> 0) + I32AtomicLoad { memarg: $crate::MemArg } => visit_i32_atomic_load (load atomic i32) + I64AtomicLoad { memarg: $crate::MemArg } => visit_i64_atomic_load (load atomic i64) + I32AtomicLoad8U { memarg: $crate::MemArg } => visit_i32_atomic_load8_u (load atomic i32) + I32AtomicLoad16U { memarg: $crate::MemArg } => visit_i32_atomic_load16_u (load atomic i32) + I64AtomicLoad8U { memarg: $crate::MemArg } => visit_i64_atomic_load8_u (load atomic i64) + I64AtomicLoad16U { memarg: $crate::MemArg } => visit_i64_atomic_load16_u (load atomic i64) + I64AtomicLoad32U { memarg: $crate::MemArg } => visit_i64_atomic_load32_u (load atomic i64) + I32AtomicStore { memarg: $crate::MemArg } => visit_i32_atomic_store (store atomic i32) + I64AtomicStore { memarg: $crate::MemArg } => visit_i64_atomic_store (store atomic i64) + I32AtomicStore8 { memarg: $crate::MemArg } => visit_i32_atomic_store8 (store atomic i32) + I32AtomicStore16 { memarg: $crate::MemArg } => visit_i32_atomic_store16 (store atomic i32) + I64AtomicStore8 { memarg: $crate::MemArg } => visit_i64_atomic_store8 (store atomic i64) + I64AtomicStore16 { memarg: $crate::MemArg } => visit_i64_atomic_store16 (store atomic i64) + I64AtomicStore32 { memarg: $crate::MemArg } => visit_i64_atomic_store32 (store atomic i64) + I32AtomicRmwAdd { memarg: $crate::MemArg } => visit_i32_atomic_rmw_add (atomic rmw i32) + I64AtomicRmwAdd { memarg: $crate::MemArg } => visit_i64_atomic_rmw_add (atomic rmw i64) + I32AtomicRmw8AddU { memarg: $crate::MemArg } => visit_i32_atomic_rmw8_add_u (atomic rmw i32) + I32AtomicRmw16AddU { memarg: $crate::MemArg } => visit_i32_atomic_rmw16_add_u (atomic rmw i32) + I64AtomicRmw8AddU { memarg: $crate::MemArg } => visit_i64_atomic_rmw8_add_u (atomic rmw i64) + I64AtomicRmw16AddU { memarg: $crate::MemArg } => visit_i64_atomic_rmw16_add_u (atomic rmw i64) + I64AtomicRmw32AddU { memarg: $crate::MemArg } => visit_i64_atomic_rmw32_add_u (atomic rmw i64) + I32AtomicRmwSub { memarg: $crate::MemArg } => visit_i32_atomic_rmw_sub (atomic rmw i32) + I64AtomicRmwSub { memarg: $crate::MemArg } => visit_i64_atomic_rmw_sub (atomic rmw i64) + I32AtomicRmw8SubU { memarg: $crate::MemArg } => visit_i32_atomic_rmw8_sub_u (atomic rmw i32) + I32AtomicRmw16SubU { memarg: $crate::MemArg } => visit_i32_atomic_rmw16_sub_u (atomic rmw i32) + I64AtomicRmw8SubU { memarg: $crate::MemArg } => visit_i64_atomic_rmw8_sub_u (atomic rmw i64) + I64AtomicRmw16SubU { memarg: $crate::MemArg } => visit_i64_atomic_rmw16_sub_u (atomic rmw i64) + I64AtomicRmw32SubU { memarg: $crate::MemArg } => visit_i64_atomic_rmw32_sub_u (atomic rmw i64) + I32AtomicRmwAnd { memarg: $crate::MemArg } => visit_i32_atomic_rmw_and (atomic rmw i32) + I64AtomicRmwAnd { memarg: $crate::MemArg } => visit_i64_atomic_rmw_and (atomic rmw i64) + I32AtomicRmw8AndU { memarg: $crate::MemArg } => visit_i32_atomic_rmw8_and_u (atomic rmw i32) + I32AtomicRmw16AndU { memarg: $crate::MemArg } => visit_i32_atomic_rmw16_and_u (atomic rmw i32) + I64AtomicRmw8AndU { memarg: $crate::MemArg } => visit_i64_atomic_rmw8_and_u (atomic rmw i64) + I64AtomicRmw16AndU { memarg: $crate::MemArg } => visit_i64_atomic_rmw16_and_u (atomic rmw i64) + I64AtomicRmw32AndU { memarg: $crate::MemArg } => visit_i64_atomic_rmw32_and_u (atomic rmw i64) + I32AtomicRmwOr { memarg: $crate::MemArg } => visit_i32_atomic_rmw_or (atomic rmw i32) + I64AtomicRmwOr { memarg: $crate::MemArg } => visit_i64_atomic_rmw_or (atomic rmw i64) + I32AtomicRmw8OrU { memarg: $crate::MemArg } => visit_i32_atomic_rmw8_or_u (atomic rmw i32) + I32AtomicRmw16OrU { memarg: $crate::MemArg } => visit_i32_atomic_rmw16_or_u (atomic rmw i32) + I64AtomicRmw8OrU { memarg: $crate::MemArg } => visit_i64_atomic_rmw8_or_u (atomic rmw i64) + I64AtomicRmw16OrU { memarg: $crate::MemArg } => visit_i64_atomic_rmw16_or_u (atomic rmw i64) + I64AtomicRmw32OrU { memarg: $crate::MemArg } => visit_i64_atomic_rmw32_or_u (atomic rmw i64) + I32AtomicRmwXor { memarg: $crate::MemArg } => visit_i32_atomic_rmw_xor (atomic rmw i32) + I64AtomicRmwXor { memarg: $crate::MemArg } => visit_i64_atomic_rmw_xor (atomic rmw i64) + I32AtomicRmw8XorU { memarg: $crate::MemArg } => visit_i32_atomic_rmw8_xor_u (atomic rmw i32) + I32AtomicRmw16XorU { memarg: $crate::MemArg } => visit_i32_atomic_rmw16_xor_u (atomic rmw i32) + I64AtomicRmw8XorU { memarg: $crate::MemArg } => visit_i64_atomic_rmw8_xor_u (atomic rmw i64) + I64AtomicRmw16XorU { memarg: $crate::MemArg } => visit_i64_atomic_rmw16_xor_u (atomic rmw i64) + I64AtomicRmw32XorU { memarg: $crate::MemArg } => visit_i64_atomic_rmw32_xor_u (atomic rmw i64) + I32AtomicRmwXchg { memarg: $crate::MemArg } => visit_i32_atomic_rmw_xchg (atomic rmw i32) + I64AtomicRmwXchg { memarg: $crate::MemArg } => visit_i64_atomic_rmw_xchg (atomic rmw i64) + I32AtomicRmw8XchgU { memarg: $crate::MemArg } => visit_i32_atomic_rmw8_xchg_u (atomic rmw i32) + I32AtomicRmw16XchgU { memarg: $crate::MemArg } => visit_i32_atomic_rmw16_xchg_u (atomic rmw i32) + I64AtomicRmw8XchgU { memarg: $crate::MemArg } => visit_i64_atomic_rmw8_xchg_u (atomic rmw i64) + I64AtomicRmw16XchgU { memarg: $crate::MemArg } => visit_i64_atomic_rmw16_xchg_u (atomic rmw i64) + I64AtomicRmw32XchgU { memarg: $crate::MemArg } => visit_i64_atomic_rmw32_xchg_u (atomic rmw i64) + I32AtomicRmwCmpxchg { memarg: $crate::MemArg } => visit_i32_atomic_rmw_cmpxchg (atomic cmpxchg i32) + I64AtomicRmwCmpxchg { memarg: $crate::MemArg } => visit_i64_atomic_rmw_cmpxchg (atomic cmpxchg i64) + I32AtomicRmw8CmpxchgU { memarg: $crate::MemArg } => visit_i32_atomic_rmw8_cmpxchg_u (atomic cmpxchg i32) + I32AtomicRmw16CmpxchgU { memarg: $crate::MemArg } => visit_i32_atomic_rmw16_cmpxchg_u (atomic cmpxchg i32) + I64AtomicRmw8CmpxchgU { memarg: $crate::MemArg } => visit_i64_atomic_rmw8_cmpxchg_u (atomic cmpxchg i64) + I64AtomicRmw16CmpxchgU { memarg: $crate::MemArg } => visit_i64_atomic_rmw16_cmpxchg_u (atomic cmpxchg i64) + I64AtomicRmw32CmpxchgU { memarg: $crate::MemArg } => visit_i64_atomic_rmw32_cmpxchg_u (atomic cmpxchg i64) + } // 0xFD operators // 128-bit SIMD // - https://github.com/webassembly/simd // - https://webassembly.github.io/simd/core/binary/instructions.html - @simd V128Load { memarg: $crate::MemArg } => visit_v128_load (load v128) - @simd V128Load8x8S { memarg: $crate::MemArg } => visit_v128_load8x8_s (load v128) - @simd V128Load8x8U { memarg: $crate::MemArg } => visit_v128_load8x8_u (load v128) - @simd V128Load16x4S { memarg: $crate::MemArg } => visit_v128_load16x4_s (load v128) - @simd V128Load16x4U { memarg: $crate::MemArg } => visit_v128_load16x4_u (load v128) - @simd V128Load32x2S { memarg: $crate::MemArg } => visit_v128_load32x2_s (load v128) - @simd V128Load32x2U { memarg: $crate::MemArg } => visit_v128_load32x2_u (load v128) - @simd V128Load8Splat { memarg: $crate::MemArg } => visit_v128_load8_splat (load v128) - @simd V128Load16Splat { memarg: $crate::MemArg } => visit_v128_load16_splat (load v128) - @simd V128Load32Splat { memarg: $crate::MemArg } => visit_v128_load32_splat (load v128) - @simd V128Load64Splat { memarg: $crate::MemArg } => visit_v128_load64_splat (load v128) - @simd V128Load32Zero { memarg: $crate::MemArg } => visit_v128_load32_zero (load v128) - @simd V128Load64Zero { memarg: $crate::MemArg } => visit_v128_load64_zero (load v128) - @simd V128Store { memarg: $crate::MemArg } => visit_v128_store (store v128) - @simd V128Load8Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_load8_lane (load lane 16) - @simd V128Load16Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_load16_lane (load lane 8) - @simd V128Load32Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_load32_lane (load lane 4) - @simd V128Load64Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_load64_lane (load lane 2) - @simd V128Store8Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_store8_lane (store lane 16) - @simd V128Store16Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_store16_lane (store lane 8) - @simd V128Store32Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_store32_lane (store lane 4) - @simd V128Store64Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_store64_lane (store lane 2) - @simd V128Const { value: $crate::V128 } => visit_v128_const (push v128) - @simd I8x16Shuffle { lanes: [u8; 16] } => visit_i8x16_shuffle (arity 2 -> 1) - @simd I8x16ExtractLaneS { lane: u8 } => visit_i8x16_extract_lane_s (extract i32 16) - @simd I8x16ExtractLaneU { lane: u8 } => visit_i8x16_extract_lane_u (extract i32 16) - @simd I8x16ReplaceLane { lane: u8 } => visit_i8x16_replace_lane (replace i32 16) - @simd I16x8ExtractLaneS { lane: u8 } => visit_i16x8_extract_lane_s (extract i32 8) - @simd I16x8ExtractLaneU { lane: u8 } => visit_i16x8_extract_lane_u (extract i32 8) - @simd I16x8ReplaceLane { lane: u8 } => visit_i16x8_replace_lane (replace i32 8) - @simd I32x4ExtractLane { lane: u8 } => visit_i32x4_extract_lane (extract i32 4) - @simd I32x4ReplaceLane { lane: u8 } => visit_i32x4_replace_lane (replace i32 4) - @simd I64x2ExtractLane { lane: u8 } => visit_i64x2_extract_lane (extract i64 2) - @simd I64x2ReplaceLane { lane: u8 } => visit_i64x2_replace_lane (replace i64 2) - @simd F32x4ExtractLane { lane: u8 } => visit_f32x4_extract_lane (extract f32 4) - @simd F32x4ReplaceLane { lane: u8 } => visit_f32x4_replace_lane (replace f32 4) - @simd F64x2ExtractLane { lane: u8 } => visit_f64x2_extract_lane (extract f64 2) - @simd F64x2ReplaceLane { lane: u8 } => visit_f64x2_replace_lane (replace f64 2) - @simd I8x16Swizzle => visit_i8x16_swizzle (binary v128) - @simd I8x16Splat => visit_i8x16_splat (splat i32) - @simd I16x8Splat => visit_i16x8_splat (splat i32) - @simd I32x4Splat => visit_i32x4_splat (splat i32) - @simd I64x2Splat => visit_i64x2_splat (splat i64) - @simd F32x4Splat => visit_f32x4_splat (splat f32) - @simd F64x2Splat => visit_f64x2_splat (splat f64) - @simd I8x16Eq => visit_i8x16_eq (binary v128) - @simd I8x16Ne => visit_i8x16_ne (binary v128) - @simd I8x16LtS => visit_i8x16_lt_s (binary v128) - @simd I8x16LtU => visit_i8x16_lt_u (binary v128) - @simd I8x16GtS => visit_i8x16_gt_s (binary v128) - @simd I8x16GtU => visit_i8x16_gt_u (binary v128) - @simd I8x16LeS => visit_i8x16_le_s (binary v128) - @simd I8x16LeU => visit_i8x16_le_u (binary v128) - @simd I8x16GeS => visit_i8x16_ge_s (binary v128) - @simd I8x16GeU => visit_i8x16_ge_u (binary v128) - @simd I16x8Eq => visit_i16x8_eq (binary v128) - @simd I16x8Ne => visit_i16x8_ne (binary v128) - @simd I16x8LtS => visit_i16x8_lt_s (binary v128) - @simd I16x8LtU => visit_i16x8_lt_u (binary v128) - @simd I16x8GtS => visit_i16x8_gt_s (binary v128) - @simd I16x8GtU => visit_i16x8_gt_u (binary v128) - @simd I16x8LeS => visit_i16x8_le_s (binary v128) - @simd I16x8LeU => visit_i16x8_le_u (binary v128) - @simd I16x8GeS => visit_i16x8_ge_s (binary v128) - @simd I16x8GeU => visit_i16x8_ge_u (binary v128) - @simd I32x4Eq => visit_i32x4_eq (binary v128) - @simd I32x4Ne => visit_i32x4_ne (binary v128) - @simd I32x4LtS => visit_i32x4_lt_s (binary v128) - @simd I32x4LtU => visit_i32x4_lt_u (binary v128) - @simd I32x4GtS => visit_i32x4_gt_s (binary v128) - @simd I32x4GtU => visit_i32x4_gt_u (binary v128) - @simd I32x4LeS => visit_i32x4_le_s (binary v128) - @simd I32x4LeU => visit_i32x4_le_u (binary v128) - @simd I32x4GeS => visit_i32x4_ge_s (binary v128) - @simd I32x4GeU => visit_i32x4_ge_u (binary v128) - @simd I64x2Eq => visit_i64x2_eq (binary v128) - @simd I64x2Ne => visit_i64x2_ne (binary v128) - @simd I64x2LtS => visit_i64x2_lt_s (binary v128) - @simd I64x2GtS => visit_i64x2_gt_s (binary v128) - @simd I64x2LeS => visit_i64x2_le_s (binary v128) - @simd I64x2GeS => visit_i64x2_ge_s (binary v128) - @simd F32x4Eq => visit_f32x4_eq (binary v128f) - @simd F32x4Ne => visit_f32x4_ne (binary v128f) - @simd F32x4Lt => visit_f32x4_lt (binary v128f) - @simd F32x4Gt => visit_f32x4_gt (binary v128f) - @simd F32x4Le => visit_f32x4_le (binary v128f) - @simd F32x4Ge => visit_f32x4_ge (binary v128f) - @simd F64x2Eq => visit_f64x2_eq (binary v128f) - @simd F64x2Ne => visit_f64x2_ne (binary v128f) - @simd F64x2Lt => visit_f64x2_lt (binary v128f) - @simd F64x2Gt => visit_f64x2_gt (binary v128f) - @simd F64x2Le => visit_f64x2_le (binary v128f) - @simd F64x2Ge => visit_f64x2_ge (binary v128f) - @simd V128Not => visit_v128_not (unary v128) - @simd V128And => visit_v128_and (binary v128) - @simd V128AndNot => visit_v128_andnot (binary v128) - @simd V128Or => visit_v128_or (binary v128) - @simd V128Xor => visit_v128_xor (binary v128) - @simd V128Bitselect => visit_v128_bitselect (ternary v128) - @simd V128AnyTrue => visit_v128_any_true (test v128) - @simd I8x16Abs => visit_i8x16_abs (unary v128) - @simd I8x16Neg => visit_i8x16_neg (unary v128) - @simd I8x16Popcnt => visit_i8x16_popcnt (unary v128) - @simd I8x16AllTrue => visit_i8x16_all_true (test v128) - @simd I8x16Bitmask => visit_i8x16_bitmask (test v128) - @simd I8x16NarrowI16x8S => visit_i8x16_narrow_i16x8_s (binary v128) - @simd I8x16NarrowI16x8U => visit_i8x16_narrow_i16x8_u (binary v128) - @simd I8x16Shl => visit_i8x16_shl (shift v128) - @simd I8x16ShrS => visit_i8x16_shr_s (shift v128) - @simd I8x16ShrU => visit_i8x16_shr_u (shift v128) - @simd I8x16Add => visit_i8x16_add (binary v128) - @simd I8x16AddSatS => visit_i8x16_add_sat_s (binary v128) - @simd I8x16AddSatU => visit_i8x16_add_sat_u (binary v128) - @simd I8x16Sub => visit_i8x16_sub (binary v128) - @simd I8x16SubSatS => visit_i8x16_sub_sat_s (binary v128) - @simd I8x16SubSatU => visit_i8x16_sub_sat_u (binary v128) - @simd I8x16MinS => visit_i8x16_min_s (binary v128) - @simd I8x16MinU => visit_i8x16_min_u (binary v128) - @simd I8x16MaxS => visit_i8x16_max_s (binary v128) - @simd I8x16MaxU => visit_i8x16_max_u (binary v128) - @simd I8x16AvgrU => visit_i8x16_avgr_u (binary v128) - @simd I16x8ExtAddPairwiseI8x16S => visit_i16x8_extadd_pairwise_i8x16_s (unary v128) - @simd I16x8ExtAddPairwiseI8x16U => visit_i16x8_extadd_pairwise_i8x16_u (unary v128) - @simd I16x8Abs => visit_i16x8_abs (unary v128) - @simd I16x8Neg => visit_i16x8_neg (unary v128) - @simd I16x8Q15MulrSatS => visit_i16x8_q15mulr_sat_s (binary v128) - @simd I16x8AllTrue => visit_i16x8_all_true (test v128) - @simd I16x8Bitmask => visit_i16x8_bitmask (test v128) - @simd I16x8NarrowI32x4S => visit_i16x8_narrow_i32x4_s (binary v128) - @simd I16x8NarrowI32x4U => visit_i16x8_narrow_i32x4_u (binary v128) - @simd I16x8ExtendLowI8x16S => visit_i16x8_extend_low_i8x16_s (unary v128) - @simd I16x8ExtendHighI8x16S => visit_i16x8_extend_high_i8x16_s (unary v128) - @simd I16x8ExtendLowI8x16U => visit_i16x8_extend_low_i8x16_u (unary v128) - @simd I16x8ExtendHighI8x16U => visit_i16x8_extend_high_i8x16_u (unary v128) - @simd I16x8Shl => visit_i16x8_shl (shift v128) - @simd I16x8ShrS => visit_i16x8_shr_s (shift v128) - @simd I16x8ShrU => visit_i16x8_shr_u (shift v128) - @simd I16x8Add => visit_i16x8_add (binary v128) - @simd I16x8AddSatS => visit_i16x8_add_sat_s (binary v128) - @simd I16x8AddSatU => visit_i16x8_add_sat_u (binary v128) - @simd I16x8Sub => visit_i16x8_sub (binary v128) - @simd I16x8SubSatS => visit_i16x8_sub_sat_s (binary v128) - @simd I16x8SubSatU => visit_i16x8_sub_sat_u (binary v128) - @simd I16x8Mul => visit_i16x8_mul (binary v128) - @simd I16x8MinS => visit_i16x8_min_s (binary v128) - @simd I16x8MinU => visit_i16x8_min_u (binary v128) - @simd I16x8MaxS => visit_i16x8_max_s (binary v128) - @simd I16x8MaxU => visit_i16x8_max_u (binary v128) - @simd I16x8AvgrU => visit_i16x8_avgr_u (binary v128) - @simd I16x8ExtMulLowI8x16S => visit_i16x8_extmul_low_i8x16_s (binary v128) - @simd I16x8ExtMulHighI8x16S => visit_i16x8_extmul_high_i8x16_s (binary v128) - @simd I16x8ExtMulLowI8x16U => visit_i16x8_extmul_low_i8x16_u (binary v128) - @simd I16x8ExtMulHighI8x16U => visit_i16x8_extmul_high_i8x16_u (binary v128) - @simd I32x4ExtAddPairwiseI16x8S => visit_i32x4_extadd_pairwise_i16x8_s (unary v128) - @simd I32x4ExtAddPairwiseI16x8U => visit_i32x4_extadd_pairwise_i16x8_u (unary v128) - @simd I32x4Abs => visit_i32x4_abs (unary v128) - @simd I32x4Neg => visit_i32x4_neg (unary v128) - @simd I32x4AllTrue => visit_i32x4_all_true (test v128) - @simd I32x4Bitmask => visit_i32x4_bitmask (test v128) - @simd I32x4ExtendLowI16x8S => visit_i32x4_extend_low_i16x8_s (unary v128) - @simd I32x4ExtendHighI16x8S => visit_i32x4_extend_high_i16x8_s (unary v128) - @simd I32x4ExtendLowI16x8U => visit_i32x4_extend_low_i16x8_u (unary v128) - @simd I32x4ExtendHighI16x8U => visit_i32x4_extend_high_i16x8_u (unary v128) - @simd I32x4Shl => visit_i32x4_shl (shift v128) - @simd I32x4ShrS => visit_i32x4_shr_s (shift v128) - @simd I32x4ShrU => visit_i32x4_shr_u (shift v128) - @simd I32x4Add => visit_i32x4_add (binary v128) - @simd I32x4Sub => visit_i32x4_sub (binary v128) - @simd I32x4Mul => visit_i32x4_mul (binary v128) - @simd I32x4MinS => visit_i32x4_min_s (binary v128) - @simd I32x4MinU => visit_i32x4_min_u (binary v128) - @simd I32x4MaxS => visit_i32x4_max_s (binary v128) - @simd I32x4MaxU => visit_i32x4_max_u (binary v128) - @simd I32x4DotI16x8S => visit_i32x4_dot_i16x8_s (binary v128) - @simd I32x4ExtMulLowI16x8S => visit_i32x4_extmul_low_i16x8_s (binary v128) - @simd I32x4ExtMulHighI16x8S => visit_i32x4_extmul_high_i16x8_s (binary v128) - @simd I32x4ExtMulLowI16x8U => visit_i32x4_extmul_low_i16x8_u (binary v128) - @simd I32x4ExtMulHighI16x8U => visit_i32x4_extmul_high_i16x8_u (binary v128) - @simd I64x2Abs => visit_i64x2_abs (unary v128) - @simd I64x2Neg => visit_i64x2_neg (unary v128) - @simd I64x2AllTrue => visit_i64x2_all_true (test v128) - @simd I64x2Bitmask => visit_i64x2_bitmask (test v128) - @simd I64x2ExtendLowI32x4S => visit_i64x2_extend_low_i32x4_s (unary v128) - @simd I64x2ExtendHighI32x4S => visit_i64x2_extend_high_i32x4_s (unary v128) - @simd I64x2ExtendLowI32x4U => visit_i64x2_extend_low_i32x4_u (unary v128) - @simd I64x2ExtendHighI32x4U => visit_i64x2_extend_high_i32x4_u (unary v128) - @simd I64x2Shl => visit_i64x2_shl (shift v128) - @simd I64x2ShrS => visit_i64x2_shr_s (shift v128) - @simd I64x2ShrU => visit_i64x2_shr_u (shift v128) - @simd I64x2Add => visit_i64x2_add (binary v128) - @simd I64x2Sub => visit_i64x2_sub (binary v128) - @simd I64x2Mul => visit_i64x2_mul (binary v128) - @simd I64x2ExtMulLowI32x4S => visit_i64x2_extmul_low_i32x4_s (binary v128) - @simd I64x2ExtMulHighI32x4S => visit_i64x2_extmul_high_i32x4_s (binary v128) - @simd I64x2ExtMulLowI32x4U => visit_i64x2_extmul_low_i32x4_u (binary v128) - @simd I64x2ExtMulHighI32x4U => visit_i64x2_extmul_high_i32x4_u (binary v128) - @simd F32x4Ceil => visit_f32x4_ceil (unary v128f) - @simd F32x4Floor => visit_f32x4_floor (unary v128f) - @simd F32x4Trunc => visit_f32x4_trunc (unary v128f) - @simd F32x4Nearest => visit_f32x4_nearest (unary v128f) - @simd F32x4Abs => visit_f32x4_abs (unary v128f) - @simd F32x4Neg => visit_f32x4_neg (unary v128f) - @simd F32x4Sqrt => visit_f32x4_sqrt (unary v128f) - @simd F32x4Add => visit_f32x4_add (binary v128f) - @simd F32x4Sub => visit_f32x4_sub (binary v128f) - @simd F32x4Mul => visit_f32x4_mul (binary v128f) - @simd F32x4Div => visit_f32x4_div (binary v128f) - @simd F32x4Min => visit_f32x4_min (binary v128f) - @simd F32x4Max => visit_f32x4_max (binary v128f) - @simd F32x4PMin => visit_f32x4_pmin (binary v128f) - @simd F32x4PMax => visit_f32x4_pmax (binary v128f) - @simd F64x2Ceil => visit_f64x2_ceil (unary v128f) - @simd F64x2Floor => visit_f64x2_floor (unary v128f) - @simd F64x2Trunc => visit_f64x2_trunc (unary v128f) - @simd F64x2Nearest => visit_f64x2_nearest (unary v128f) - @simd F64x2Abs => visit_f64x2_abs (unary v128f) - @simd F64x2Neg => visit_f64x2_neg (unary v128f) - @simd F64x2Sqrt => visit_f64x2_sqrt (unary v128f) - @simd F64x2Add => visit_f64x2_add (binary v128f) - @simd F64x2Sub => visit_f64x2_sub (binary v128f) - @simd F64x2Mul => visit_f64x2_mul (binary v128f) - @simd F64x2Div => visit_f64x2_div (binary v128f) - @simd F64x2Min => visit_f64x2_min (binary v128f) - @simd F64x2Max => visit_f64x2_max (binary v128f) - @simd F64x2PMin => visit_f64x2_pmin (binary v128f) - @simd F64x2PMax => visit_f64x2_pmax (binary v128f) - @simd I32x4TruncSatF32x4S => visit_i32x4_trunc_sat_f32x4_s (unary v128f) - @simd I32x4TruncSatF32x4U => visit_i32x4_trunc_sat_f32x4_u (unary v128f) - @simd F32x4ConvertI32x4S => visit_f32x4_convert_i32x4_s (unary v128f) - @simd F32x4ConvertI32x4U => visit_f32x4_convert_i32x4_u (unary v128f) - @simd I32x4TruncSatF64x2SZero => visit_i32x4_trunc_sat_f64x2_s_zero (unary v128f) - @simd I32x4TruncSatF64x2UZero => visit_i32x4_trunc_sat_f64x2_u_zero (unary v128f) - @simd F64x2ConvertLowI32x4S => visit_f64x2_convert_low_i32x4_s (unary v128f) - @simd F64x2ConvertLowI32x4U => visit_f64x2_convert_low_i32x4_u (unary v128f) - @simd F32x4DemoteF64x2Zero => visit_f32x4_demote_f64x2_zero (unary v128f) - @simd F64x2PromoteLowF32x4 => visit_f64x2_promote_low_f32x4 (unary v128f) + @simd { + V128Load { memarg: $crate::MemArg } => visit_v128_load (load v128) + V128Load8x8S { memarg: $crate::MemArg } => visit_v128_load8x8_s (load v128) + V128Load8x8U { memarg: $crate::MemArg } => visit_v128_load8x8_u (load v128) + V128Load16x4S { memarg: $crate::MemArg } => visit_v128_load16x4_s (load v128) + V128Load16x4U { memarg: $crate::MemArg } => visit_v128_load16x4_u (load v128) + V128Load32x2S { memarg: $crate::MemArg } => visit_v128_load32x2_s (load v128) + V128Load32x2U { memarg: $crate::MemArg } => visit_v128_load32x2_u (load v128) + V128Load8Splat { memarg: $crate::MemArg } => visit_v128_load8_splat (load v128) + V128Load16Splat { memarg: $crate::MemArg } => visit_v128_load16_splat (load v128) + V128Load32Splat { memarg: $crate::MemArg } => visit_v128_load32_splat (load v128) + V128Load64Splat { memarg: $crate::MemArg } => visit_v128_load64_splat (load v128) + V128Load32Zero { memarg: $crate::MemArg } => visit_v128_load32_zero (load v128) + V128Load64Zero { memarg: $crate::MemArg } => visit_v128_load64_zero (load v128) + V128Store { memarg: $crate::MemArg } => visit_v128_store (store v128) + V128Load8Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_load8_lane (load lane 16) + V128Load16Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_load16_lane (load lane 8) + V128Load32Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_load32_lane (load lane 4) + V128Load64Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_load64_lane (load lane 2) + V128Store8Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_store8_lane (store lane 16) + V128Store16Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_store16_lane (store lane 8) + V128Store32Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_store32_lane (store lane 4) + V128Store64Lane { memarg: $crate::MemArg, lane: u8 } => visit_v128_store64_lane (store lane 2) + V128Const { value: $crate::V128 } => visit_v128_const (push v128) + I8x16Shuffle { lanes: [u8; 16] } => visit_i8x16_shuffle (arity 2 -> 1) + I8x16ExtractLaneS { lane: u8 } => visit_i8x16_extract_lane_s (extract i32 16) + I8x16ExtractLaneU { lane: u8 } => visit_i8x16_extract_lane_u (extract i32 16) + I8x16ReplaceLane { lane: u8 } => visit_i8x16_replace_lane (replace i32 16) + I16x8ExtractLaneS { lane: u8 } => visit_i16x8_extract_lane_s (extract i32 8) + I16x8ExtractLaneU { lane: u8 } => visit_i16x8_extract_lane_u (extract i32 8) + I16x8ReplaceLane { lane: u8 } => visit_i16x8_replace_lane (replace i32 8) + I32x4ExtractLane { lane: u8 } => visit_i32x4_extract_lane (extract i32 4) + I32x4ReplaceLane { lane: u8 } => visit_i32x4_replace_lane (replace i32 4) + I64x2ExtractLane { lane: u8 } => visit_i64x2_extract_lane (extract i64 2) + I64x2ReplaceLane { lane: u8 } => visit_i64x2_replace_lane (replace i64 2) + F32x4ExtractLane { lane: u8 } => visit_f32x4_extract_lane (extract f32 4) + F32x4ReplaceLane { lane: u8 } => visit_f32x4_replace_lane (replace f32 4) + F64x2ExtractLane { lane: u8 } => visit_f64x2_extract_lane (extract f64 2) + F64x2ReplaceLane { lane: u8 } => visit_f64x2_replace_lane (replace f64 2) + I8x16Swizzle => visit_i8x16_swizzle (binary v128) + I8x16Splat => visit_i8x16_splat (splat i32) + I16x8Splat => visit_i16x8_splat (splat i32) + I32x4Splat => visit_i32x4_splat (splat i32) + I64x2Splat => visit_i64x2_splat (splat i64) + F32x4Splat => visit_f32x4_splat (splat f32) + F64x2Splat => visit_f64x2_splat (splat f64) + I8x16Eq => visit_i8x16_eq (binary v128) + I8x16Ne => visit_i8x16_ne (binary v128) + I8x16LtS => visit_i8x16_lt_s (binary v128) + I8x16LtU => visit_i8x16_lt_u (binary v128) + I8x16GtS => visit_i8x16_gt_s (binary v128) + I8x16GtU => visit_i8x16_gt_u (binary v128) + I8x16LeS => visit_i8x16_le_s (binary v128) + I8x16LeU => visit_i8x16_le_u (binary v128) + I8x16GeS => visit_i8x16_ge_s (binary v128) + I8x16GeU => visit_i8x16_ge_u (binary v128) + I16x8Eq => visit_i16x8_eq (binary v128) + I16x8Ne => visit_i16x8_ne (binary v128) + I16x8LtS => visit_i16x8_lt_s (binary v128) + I16x8LtU => visit_i16x8_lt_u (binary v128) + I16x8GtS => visit_i16x8_gt_s (binary v128) + I16x8GtU => visit_i16x8_gt_u (binary v128) + I16x8LeS => visit_i16x8_le_s (binary v128) + I16x8LeU => visit_i16x8_le_u (binary v128) + I16x8GeS => visit_i16x8_ge_s (binary v128) + I16x8GeU => visit_i16x8_ge_u (binary v128) + I32x4Eq => visit_i32x4_eq (binary v128) + I32x4Ne => visit_i32x4_ne (binary v128) + I32x4LtS => visit_i32x4_lt_s (binary v128) + I32x4LtU => visit_i32x4_lt_u (binary v128) + I32x4GtS => visit_i32x4_gt_s (binary v128) + I32x4GtU => visit_i32x4_gt_u (binary v128) + I32x4LeS => visit_i32x4_le_s (binary v128) + I32x4LeU => visit_i32x4_le_u (binary v128) + I32x4GeS => visit_i32x4_ge_s (binary v128) + I32x4GeU => visit_i32x4_ge_u (binary v128) + I64x2Eq => visit_i64x2_eq (binary v128) + I64x2Ne => visit_i64x2_ne (binary v128) + I64x2LtS => visit_i64x2_lt_s (binary v128) + I64x2GtS => visit_i64x2_gt_s (binary v128) + I64x2LeS => visit_i64x2_le_s (binary v128) + I64x2GeS => visit_i64x2_ge_s (binary v128) + F32x4Eq => visit_f32x4_eq (binary v128f) + F32x4Ne => visit_f32x4_ne (binary v128f) + F32x4Lt => visit_f32x4_lt (binary v128f) + F32x4Gt => visit_f32x4_gt (binary v128f) + F32x4Le => visit_f32x4_le (binary v128f) + F32x4Ge => visit_f32x4_ge (binary v128f) + F64x2Eq => visit_f64x2_eq (binary v128f) + F64x2Ne => visit_f64x2_ne (binary v128f) + F64x2Lt => visit_f64x2_lt (binary v128f) + F64x2Gt => visit_f64x2_gt (binary v128f) + F64x2Le => visit_f64x2_le (binary v128f) + F64x2Ge => visit_f64x2_ge (binary v128f) + V128Not => visit_v128_not (unary v128) + V128And => visit_v128_and (binary v128) + V128AndNot => visit_v128_andnot (binary v128) + V128Or => visit_v128_or (binary v128) + V128Xor => visit_v128_xor (binary v128) + V128Bitselect => visit_v128_bitselect (ternary v128) + V128AnyTrue => visit_v128_any_true (test v128) + I8x16Abs => visit_i8x16_abs (unary v128) + I8x16Neg => visit_i8x16_neg (unary v128) + I8x16Popcnt => visit_i8x16_popcnt (unary v128) + I8x16AllTrue => visit_i8x16_all_true (test v128) + I8x16Bitmask => visit_i8x16_bitmask (test v128) + I8x16NarrowI16x8S => visit_i8x16_narrow_i16x8_s (binary v128) + I8x16NarrowI16x8U => visit_i8x16_narrow_i16x8_u (binary v128) + I8x16Shl => visit_i8x16_shl (shift v128) + I8x16ShrS => visit_i8x16_shr_s (shift v128) + I8x16ShrU => visit_i8x16_shr_u (shift v128) + I8x16Add => visit_i8x16_add (binary v128) + I8x16AddSatS => visit_i8x16_add_sat_s (binary v128) + I8x16AddSatU => visit_i8x16_add_sat_u (binary v128) + I8x16Sub => visit_i8x16_sub (binary v128) + I8x16SubSatS => visit_i8x16_sub_sat_s (binary v128) + I8x16SubSatU => visit_i8x16_sub_sat_u (binary v128) + I8x16MinS => visit_i8x16_min_s (binary v128) + I8x16MinU => visit_i8x16_min_u (binary v128) + I8x16MaxS => visit_i8x16_max_s (binary v128) + I8x16MaxU => visit_i8x16_max_u (binary v128) + I8x16AvgrU => visit_i8x16_avgr_u (binary v128) + I16x8ExtAddPairwiseI8x16S => visit_i16x8_extadd_pairwise_i8x16_s (unary v128) + I16x8ExtAddPairwiseI8x16U => visit_i16x8_extadd_pairwise_i8x16_u (unary v128) + I16x8Abs => visit_i16x8_abs (unary v128) + I16x8Neg => visit_i16x8_neg (unary v128) + I16x8Q15MulrSatS => visit_i16x8_q15mulr_sat_s (binary v128) + I16x8AllTrue => visit_i16x8_all_true (test v128) + I16x8Bitmask => visit_i16x8_bitmask (test v128) + I16x8NarrowI32x4S => visit_i16x8_narrow_i32x4_s (binary v128) + I16x8NarrowI32x4U => visit_i16x8_narrow_i32x4_u (binary v128) + I16x8ExtendLowI8x16S => visit_i16x8_extend_low_i8x16_s (unary v128) + I16x8ExtendHighI8x16S => visit_i16x8_extend_high_i8x16_s (unary v128) + I16x8ExtendLowI8x16U => visit_i16x8_extend_low_i8x16_u (unary v128) + I16x8ExtendHighI8x16U => visit_i16x8_extend_high_i8x16_u (unary v128) + I16x8Shl => visit_i16x8_shl (shift v128) + I16x8ShrS => visit_i16x8_shr_s (shift v128) + I16x8ShrU => visit_i16x8_shr_u (shift v128) + I16x8Add => visit_i16x8_add (binary v128) + I16x8AddSatS => visit_i16x8_add_sat_s (binary v128) + I16x8AddSatU => visit_i16x8_add_sat_u (binary v128) + I16x8Sub => visit_i16x8_sub (binary v128) + I16x8SubSatS => visit_i16x8_sub_sat_s (binary v128) + I16x8SubSatU => visit_i16x8_sub_sat_u (binary v128) + I16x8Mul => visit_i16x8_mul (binary v128) + I16x8MinS => visit_i16x8_min_s (binary v128) + I16x8MinU => visit_i16x8_min_u (binary v128) + I16x8MaxS => visit_i16x8_max_s (binary v128) + I16x8MaxU => visit_i16x8_max_u (binary v128) + I16x8AvgrU => visit_i16x8_avgr_u (binary v128) + I16x8ExtMulLowI8x16S => visit_i16x8_extmul_low_i8x16_s (binary v128) + I16x8ExtMulHighI8x16S => visit_i16x8_extmul_high_i8x16_s (binary v128) + I16x8ExtMulLowI8x16U => visit_i16x8_extmul_low_i8x16_u (binary v128) + I16x8ExtMulHighI8x16U => visit_i16x8_extmul_high_i8x16_u (binary v128) + I32x4ExtAddPairwiseI16x8S => visit_i32x4_extadd_pairwise_i16x8_s (unary v128) + I32x4ExtAddPairwiseI16x8U => visit_i32x4_extadd_pairwise_i16x8_u (unary v128) + I32x4Abs => visit_i32x4_abs (unary v128) + I32x4Neg => visit_i32x4_neg (unary v128) + I32x4AllTrue => visit_i32x4_all_true (test v128) + I32x4Bitmask => visit_i32x4_bitmask (test v128) + I32x4ExtendLowI16x8S => visit_i32x4_extend_low_i16x8_s (unary v128) + I32x4ExtendHighI16x8S => visit_i32x4_extend_high_i16x8_s (unary v128) + I32x4ExtendLowI16x8U => visit_i32x4_extend_low_i16x8_u (unary v128) + I32x4ExtendHighI16x8U => visit_i32x4_extend_high_i16x8_u (unary v128) + I32x4Shl => visit_i32x4_shl (shift v128) + I32x4ShrS => visit_i32x4_shr_s (shift v128) + I32x4ShrU => visit_i32x4_shr_u (shift v128) + I32x4Add => visit_i32x4_add (binary v128) + I32x4Sub => visit_i32x4_sub (binary v128) + I32x4Mul => visit_i32x4_mul (binary v128) + I32x4MinS => visit_i32x4_min_s (binary v128) + I32x4MinU => visit_i32x4_min_u (binary v128) + I32x4MaxS => visit_i32x4_max_s (binary v128) + I32x4MaxU => visit_i32x4_max_u (binary v128) + I32x4DotI16x8S => visit_i32x4_dot_i16x8_s (binary v128) + I32x4ExtMulLowI16x8S => visit_i32x4_extmul_low_i16x8_s (binary v128) + I32x4ExtMulHighI16x8S => visit_i32x4_extmul_high_i16x8_s (binary v128) + I32x4ExtMulLowI16x8U => visit_i32x4_extmul_low_i16x8_u (binary v128) + I32x4ExtMulHighI16x8U => visit_i32x4_extmul_high_i16x8_u (binary v128) + I64x2Abs => visit_i64x2_abs (unary v128) + I64x2Neg => visit_i64x2_neg (unary v128) + I64x2AllTrue => visit_i64x2_all_true (test v128) + I64x2Bitmask => visit_i64x2_bitmask (test v128) + I64x2ExtendLowI32x4S => visit_i64x2_extend_low_i32x4_s (unary v128) + I64x2ExtendHighI32x4S => visit_i64x2_extend_high_i32x4_s (unary v128) + I64x2ExtendLowI32x4U => visit_i64x2_extend_low_i32x4_u (unary v128) + I64x2ExtendHighI32x4U => visit_i64x2_extend_high_i32x4_u (unary v128) + I64x2Shl => visit_i64x2_shl (shift v128) + I64x2ShrS => visit_i64x2_shr_s (shift v128) + I64x2ShrU => visit_i64x2_shr_u (shift v128) + I64x2Add => visit_i64x2_add (binary v128) + I64x2Sub => visit_i64x2_sub (binary v128) + I64x2Mul => visit_i64x2_mul (binary v128) + I64x2ExtMulLowI32x4S => visit_i64x2_extmul_low_i32x4_s (binary v128) + I64x2ExtMulHighI32x4S => visit_i64x2_extmul_high_i32x4_s (binary v128) + I64x2ExtMulLowI32x4U => visit_i64x2_extmul_low_i32x4_u (binary v128) + I64x2ExtMulHighI32x4U => visit_i64x2_extmul_high_i32x4_u (binary v128) + F32x4Ceil => visit_f32x4_ceil (unary v128f) + F32x4Floor => visit_f32x4_floor (unary v128f) + F32x4Trunc => visit_f32x4_trunc (unary v128f) + F32x4Nearest => visit_f32x4_nearest (unary v128f) + F32x4Abs => visit_f32x4_abs (unary v128f) + F32x4Neg => visit_f32x4_neg (unary v128f) + F32x4Sqrt => visit_f32x4_sqrt (unary v128f) + F32x4Add => visit_f32x4_add (binary v128f) + F32x4Sub => visit_f32x4_sub (binary v128f) + F32x4Mul => visit_f32x4_mul (binary v128f) + F32x4Div => visit_f32x4_div (binary v128f) + F32x4Min => visit_f32x4_min (binary v128f) + F32x4Max => visit_f32x4_max (binary v128f) + F32x4PMin => visit_f32x4_pmin (binary v128f) + F32x4PMax => visit_f32x4_pmax (binary v128f) + F64x2Ceil => visit_f64x2_ceil (unary v128f) + F64x2Floor => visit_f64x2_floor (unary v128f) + F64x2Trunc => visit_f64x2_trunc (unary v128f) + F64x2Nearest => visit_f64x2_nearest (unary v128f) + F64x2Abs => visit_f64x2_abs (unary v128f) + F64x2Neg => visit_f64x2_neg (unary v128f) + F64x2Sqrt => visit_f64x2_sqrt (unary v128f) + F64x2Add => visit_f64x2_add (binary v128f) + F64x2Sub => visit_f64x2_sub (binary v128f) + F64x2Mul => visit_f64x2_mul (binary v128f) + F64x2Div => visit_f64x2_div (binary v128f) + F64x2Min => visit_f64x2_min (binary v128f) + F64x2Max => visit_f64x2_max (binary v128f) + F64x2PMin => visit_f64x2_pmin (binary v128f) + F64x2PMax => visit_f64x2_pmax (binary v128f) + I32x4TruncSatF32x4S => visit_i32x4_trunc_sat_f32x4_s (unary v128f) + I32x4TruncSatF32x4U => visit_i32x4_trunc_sat_f32x4_u (unary v128f) + F32x4ConvertI32x4S => visit_f32x4_convert_i32x4_s (unary v128f) + F32x4ConvertI32x4U => visit_f32x4_convert_i32x4_u (unary v128f) + I32x4TruncSatF64x2SZero => visit_i32x4_trunc_sat_f64x2_s_zero (unary v128f) + I32x4TruncSatF64x2UZero => visit_i32x4_trunc_sat_f64x2_u_zero (unary v128f) + F64x2ConvertLowI32x4S => visit_f64x2_convert_low_i32x4_s (unary v128f) + F64x2ConvertLowI32x4U => visit_f64x2_convert_low_i32x4_u (unary v128f) + F32x4DemoteF64x2Zero => visit_f32x4_demote_f64x2_zero (unary v128f) + F64x2PromoteLowF32x4 => visit_f64x2_promote_low_f32x4 (unary v128f) + } // Relaxed SIMD operators // https://github.com/WebAssembly/relaxed-simd - @relaxed_simd I8x16RelaxedSwizzle => visit_i8x16_relaxed_swizzle (binary v128) - @relaxed_simd I32x4RelaxedTruncF32x4S => visit_i32x4_relaxed_trunc_f32x4_s (unary v128) - @relaxed_simd I32x4RelaxedTruncF32x4U => visit_i32x4_relaxed_trunc_f32x4_u (unary v128) - @relaxed_simd I32x4RelaxedTruncF64x2SZero => visit_i32x4_relaxed_trunc_f64x2_s_zero (unary v128) - @relaxed_simd I32x4RelaxedTruncF64x2UZero => visit_i32x4_relaxed_trunc_f64x2_u_zero (unary v128) - @relaxed_simd F32x4RelaxedMadd => visit_f32x4_relaxed_madd (ternary v128) - @relaxed_simd F32x4RelaxedNmadd => visit_f32x4_relaxed_nmadd (ternary v128) - @relaxed_simd F64x2RelaxedMadd => visit_f64x2_relaxed_madd (ternary v128) - @relaxed_simd F64x2RelaxedNmadd => visit_f64x2_relaxed_nmadd (ternary v128) - @relaxed_simd I8x16RelaxedLaneselect => visit_i8x16_relaxed_laneselect (ternary v128) - @relaxed_simd I16x8RelaxedLaneselect => visit_i16x8_relaxed_laneselect (ternary v128) - @relaxed_simd I32x4RelaxedLaneselect => visit_i32x4_relaxed_laneselect (ternary v128) - @relaxed_simd I64x2RelaxedLaneselect => visit_i64x2_relaxed_laneselect (ternary v128) - @relaxed_simd F32x4RelaxedMin => visit_f32x4_relaxed_min (binary v128) - @relaxed_simd F32x4RelaxedMax => visit_f32x4_relaxed_max (binary v128) - @relaxed_simd F64x2RelaxedMin => visit_f64x2_relaxed_min (binary v128) - @relaxed_simd F64x2RelaxedMax => visit_f64x2_relaxed_max (binary v128) - @relaxed_simd I16x8RelaxedQ15mulrS => visit_i16x8_relaxed_q15mulr_s (binary v128) - @relaxed_simd I16x8RelaxedDotI8x16I7x16S => visit_i16x8_relaxed_dot_i8x16_i7x16_s (binary v128) - @relaxed_simd I32x4RelaxedDotI8x16I7x16AddS => visit_i32x4_relaxed_dot_i8x16_i7x16_add_s (ternary v128) + @relaxed_simd { + I8x16RelaxedSwizzle => visit_i8x16_relaxed_swizzle (binary v128) + I32x4RelaxedTruncF32x4S => visit_i32x4_relaxed_trunc_f32x4_s (unary v128) + I32x4RelaxedTruncF32x4U => visit_i32x4_relaxed_trunc_f32x4_u (unary v128) + I32x4RelaxedTruncF64x2SZero => visit_i32x4_relaxed_trunc_f64x2_s_zero (unary v128) + I32x4RelaxedTruncF64x2UZero => visit_i32x4_relaxed_trunc_f64x2_u_zero (unary v128) + F32x4RelaxedMadd => visit_f32x4_relaxed_madd (ternary v128) + F32x4RelaxedNmadd => visit_f32x4_relaxed_nmadd (ternary v128) + F64x2RelaxedMadd => visit_f64x2_relaxed_madd (ternary v128) + F64x2RelaxedNmadd => visit_f64x2_relaxed_nmadd (ternary v128) + I8x16RelaxedLaneselect => visit_i8x16_relaxed_laneselect (ternary v128) + I16x8RelaxedLaneselect => visit_i16x8_relaxed_laneselect (ternary v128) + I32x4RelaxedLaneselect => visit_i32x4_relaxed_laneselect (ternary v128) + I64x2RelaxedLaneselect => visit_i64x2_relaxed_laneselect (ternary v128) + F32x4RelaxedMin => visit_f32x4_relaxed_min (binary v128) + F32x4RelaxedMax => visit_f32x4_relaxed_max (binary v128) + F64x2RelaxedMin => visit_f64x2_relaxed_min (binary v128) + F64x2RelaxedMax => visit_f64x2_relaxed_max (binary v128) + I16x8RelaxedQ15mulrS => visit_i16x8_relaxed_q15mulr_s (binary v128) + I16x8RelaxedDotI8x16I7x16S => visit_i16x8_relaxed_dot_i8x16_i7x16_s (binary v128) + I32x4RelaxedDotI8x16I7x16AddS => visit_i32x4_relaxed_dot_i8x16_i7x16_add_s (ternary v128) + } - @exceptions TryTable { try_table: $crate::TryTable } => visit_try_table (arity try_table -> ~try_table) - @exceptions Throw { tag_index: u32 } => visit_throw (arity tag -> 0) - @exceptions ThrowRef => visit_throw_ref (arity 1 -> 0) + @exceptions { + TryTable { try_table: $crate::TryTable } => visit_try_table (arity try_table -> ~try_table) + Throw { tag_index: u32 } => visit_throw (arity tag -> 0) + ThrowRef => visit_throw_ref (arity 1 -> 0) + } // Deprecated old instructions from the exceptions proposal - @legacy_exceptions Try { blockty: $crate::BlockType } => visit_try (arity block -> ~block) - @legacy_exceptions Catch { tag_index: u32 } => visit_catch (arity ~end -> ~tag) - @legacy_exceptions Rethrow { relative_depth: u32 } => visit_rethrow (arity 0 -> 0) - @legacy_exceptions Delegate { relative_depth: u32 } => visit_delegate (arity ~end -> end) - @legacy_exceptions CatchAll => visit_catch_all (arity ~end -> 0) + @legacy_exceptions { + Try { blockty: $crate::BlockType } => visit_try (arity block -> ~block) + Catch { tag_index: u32 } => visit_catch (arity ~end -> ~tag) + Rethrow { relative_depth: u32 } => visit_rethrow (arity 0 -> 0) + Delegate { relative_depth: u32 } => visit_delegate (arity ~end -> end) + CatchAll => visit_catch_all (arity ~end -> 0) + } // Also 0xFE prefixed operators // shared-everything threads // https://github.com/WebAssembly/shared-everything-threads - @shared_everything_threads GlobalAtomicGet { ordering: $crate::Ordering, global_index: u32 } => visit_global_atomic_get (arity 0 -> 1) - @shared_everything_threads GlobalAtomicSet { ordering: $crate::Ordering, global_index: u32 } => visit_global_atomic_set (arity 1 -> 0) - @shared_everything_threads GlobalAtomicRmwAdd { ordering: $crate::Ordering, global_index: u32 } => visit_global_atomic_rmw_add (unary atomic global) - @shared_everything_threads GlobalAtomicRmwSub { ordering: $crate::Ordering, global_index: u32 } => visit_global_atomic_rmw_sub (unary atomic global) - @shared_everything_threads GlobalAtomicRmwAnd { ordering: $crate::Ordering, global_index: u32 } => visit_global_atomic_rmw_and (unary atomic global) - @shared_everything_threads GlobalAtomicRmwOr { ordering: $crate::Ordering, global_index: u32 } => visit_global_atomic_rmw_or (unary atomic global) - @shared_everything_threads GlobalAtomicRmwXor { ordering: $crate::Ordering, global_index: u32 } => visit_global_atomic_rmw_xor (unary atomic global) - @shared_everything_threads GlobalAtomicRmwXchg { ordering: $crate::Ordering, global_index: u32 } => visit_global_atomic_rmw_xchg (arity 1 -> 1) - @shared_everything_threads GlobalAtomicRmwCmpxchg { ordering: $crate::Ordering, global_index: u32 } => visit_global_atomic_rmw_cmpxchg (arity 2 -> 1) - @shared_everything_threads TableAtomicGet { ordering: $crate::Ordering, table_index: u32 } => visit_table_atomic_get (arity 1 -> 1) - @shared_everything_threads TableAtomicSet { ordering: $crate::Ordering, table_index: u32 } => visit_table_atomic_set (arity 2 -> 0) - @shared_everything_threads TableAtomicRmwXchg { ordering: $crate::Ordering, table_index: u32 } => visit_table_atomic_rmw_xchg (arity 2 -> 1) - @shared_everything_threads TableAtomicRmwCmpxchg { ordering: $crate::Ordering, table_index: u32 } => visit_table_atomic_rmw_cmpxchg (arity 3 -> 1) - @shared_everything_threads StructAtomicGet { ordering: $crate::Ordering, struct_type_index: u32, field_index: u32 } => visit_struct_atomic_get (arity 1 -> 1) - @shared_everything_threads StructAtomicGetS { ordering: $crate::Ordering, struct_type_index: u32, field_index: u32 } => visit_struct_atomic_get_s (arity 1 -> 1) - @shared_everything_threads StructAtomicGetU { ordering: $crate::Ordering, struct_type_index: u32, field_index: u32 } => visit_struct_atomic_get_u (arity 1 -> 1) - @shared_everything_threads StructAtomicSet { ordering: $crate::Ordering, struct_type_index: u32, field_index: u32 } => visit_struct_atomic_set (arity 2 -> 0) - @shared_everything_threads StructAtomicRmwAdd { ordering: $crate::Ordering, struct_type_index: u32, field_index: u32 } => visit_struct_atomic_rmw_add (atomic rmw struct add) - @shared_everything_threads StructAtomicRmwSub { ordering: $crate::Ordering, struct_type_index: u32, field_index: u32 } => visit_struct_atomic_rmw_sub (atomic rmw struct sub) - @shared_everything_threads StructAtomicRmwAnd { ordering: $crate::Ordering, struct_type_index: u32, field_index: u32 } => visit_struct_atomic_rmw_and (atomic rmw struct and) - @shared_everything_threads StructAtomicRmwOr { ordering: $crate::Ordering, struct_type_index: u32, field_index: u32 } => visit_struct_atomic_rmw_or (atomic rmw struct or) - @shared_everything_threads StructAtomicRmwXor { ordering: $crate::Ordering, struct_type_index: u32, field_index: u32 } => visit_struct_atomic_rmw_xor (atomic rmw struct xor) - @shared_everything_threads StructAtomicRmwXchg { ordering: $crate::Ordering, struct_type_index: u32, field_index: u32 } => visit_struct_atomic_rmw_xchg (arity 2 -> 1) - @shared_everything_threads StructAtomicRmwCmpxchg { ordering: $crate::Ordering, struct_type_index: u32, field_index: u32 } => visit_struct_atomic_rmw_cmpxchg (arity 3 -> 1) - @shared_everything_threads ArrayAtomicGet { ordering: $crate::Ordering, array_type_index: u32 } => visit_array_atomic_get (arity 2 -> 1) - @shared_everything_threads ArrayAtomicGetS { ordering: $crate::Ordering, array_type_index: u32 } => visit_array_atomic_get_s (arity 2 -> 1) - @shared_everything_threads ArrayAtomicGetU { ordering: $crate::Ordering, array_type_index: u32 } => visit_array_atomic_get_u (arity 2 -> 1) - @shared_everything_threads ArrayAtomicSet { ordering: $crate::Ordering, array_type_index: u32 } => visit_array_atomic_set (arity 3 -> 0) - @shared_everything_threads ArrayAtomicRmwAdd { ordering: $crate::Ordering, array_type_index: u32 } => visit_array_atomic_rmw_add (atomic rmw array add) - @shared_everything_threads ArrayAtomicRmwSub { ordering: $crate::Ordering, array_type_index: u32 } => visit_array_atomic_rmw_sub (atomic rmw array sub) - @shared_everything_threads ArrayAtomicRmwAnd { ordering: $crate::Ordering, array_type_index: u32 } => visit_array_atomic_rmw_and (atomic rmw array and) - @shared_everything_threads ArrayAtomicRmwOr { ordering: $crate::Ordering, array_type_index: u32 } => visit_array_atomic_rmw_or (atomic rmw array or) - @shared_everything_threads ArrayAtomicRmwXor { ordering: $crate::Ordering, array_type_index: u32 } => visit_array_atomic_rmw_xor (atomic rmw array xor) - @shared_everything_threads ArrayAtomicRmwXchg { ordering: $crate::Ordering, array_type_index: u32 } => visit_array_atomic_rmw_xchg (arity 3 -> 1) - @shared_everything_threads ArrayAtomicRmwCmpxchg { ordering: $crate::Ordering, array_type_index: u32 } => visit_array_atomic_rmw_cmpxchg (arity 4 -> 1) - @shared_everything_threads RefI31Shared => visit_ref_i31_shared (arity 1 -> 1) + @shared_everything_threads { + GlobalAtomicGet { ordering: $crate::Ordering, global_index: u32 } => visit_global_atomic_get (arity 0 -> 1) + GlobalAtomicSet { ordering: $crate::Ordering, global_index: u32 } => visit_global_atomic_set (arity 1 -> 0) + GlobalAtomicRmwAdd { ordering: $crate::Ordering, global_index: u32 } => visit_global_atomic_rmw_add (unary atomic global) + GlobalAtomicRmwSub { ordering: $crate::Ordering, global_index: u32 } => visit_global_atomic_rmw_sub (unary atomic global) + GlobalAtomicRmwAnd { ordering: $crate::Ordering, global_index: u32 } => visit_global_atomic_rmw_and (unary atomic global) + GlobalAtomicRmwOr { ordering: $crate::Ordering, global_index: u32 } => visit_global_atomic_rmw_or (unary atomic global) + GlobalAtomicRmwXor { ordering: $crate::Ordering, global_index: u32 } => visit_global_atomic_rmw_xor (unary atomic global) + GlobalAtomicRmwXchg { ordering: $crate::Ordering, global_index: u32 } => visit_global_atomic_rmw_xchg (arity 1 -> 1) + GlobalAtomicRmwCmpxchg { ordering: $crate::Ordering, global_index: u32 } => visit_global_atomic_rmw_cmpxchg (arity 2 -> 1) + TableAtomicGet { ordering: $crate::Ordering, table_index: u32 } => visit_table_atomic_get (arity 1 -> 1) + TableAtomicSet { ordering: $crate::Ordering, table_index: u32 } => visit_table_atomic_set (arity 2 -> 0) + TableAtomicRmwXchg { ordering: $crate::Ordering, table_index: u32 } => visit_table_atomic_rmw_xchg (arity 2 -> 1) + TableAtomicRmwCmpxchg { ordering: $crate::Ordering, table_index: u32 } => visit_table_atomic_rmw_cmpxchg (arity 3 -> 1) + StructAtomicGet { ordering: $crate::Ordering, struct_type_index: u32, field_index: u32 } => visit_struct_atomic_get (arity 1 -> 1) + StructAtomicGetS { ordering: $crate::Ordering, struct_type_index: u32, field_index: u32 } => visit_struct_atomic_get_s (arity 1 -> 1) + StructAtomicGetU { ordering: $crate::Ordering, struct_type_index: u32, field_index: u32 } => visit_struct_atomic_get_u (arity 1 -> 1) + StructAtomicSet { ordering: $crate::Ordering, struct_type_index: u32, field_index: u32 } => visit_struct_atomic_set (arity 2 -> 0) + StructAtomicRmwAdd { ordering: $crate::Ordering, struct_type_index: u32, field_index: u32 } => visit_struct_atomic_rmw_add (atomic rmw struct add) + StructAtomicRmwSub { ordering: $crate::Ordering, struct_type_index: u32, field_index: u32 } => visit_struct_atomic_rmw_sub (atomic rmw struct sub) + StructAtomicRmwAnd { ordering: $crate::Ordering, struct_type_index: u32, field_index: u32 } => visit_struct_atomic_rmw_and (atomic rmw struct and) + StructAtomicRmwOr { ordering: $crate::Ordering, struct_type_index: u32, field_index: u32 } => visit_struct_atomic_rmw_or (atomic rmw struct or) + StructAtomicRmwXor { ordering: $crate::Ordering, struct_type_index: u32, field_index: u32 } => visit_struct_atomic_rmw_xor (atomic rmw struct xor) + StructAtomicRmwXchg { ordering: $crate::Ordering, struct_type_index: u32, field_index: u32 } => visit_struct_atomic_rmw_xchg (arity 2 -> 1) + StructAtomicRmwCmpxchg { ordering: $crate::Ordering, struct_type_index: u32, field_index: u32 } => visit_struct_atomic_rmw_cmpxchg (arity 3 -> 1) + ArrayAtomicGet { ordering: $crate::Ordering, array_type_index: u32 } => visit_array_atomic_get (arity 2 -> 1) + ArrayAtomicGetS { ordering: $crate::Ordering, array_type_index: u32 } => visit_array_atomic_get_s (arity 2 -> 1) + ArrayAtomicGetU { ordering: $crate::Ordering, array_type_index: u32 } => visit_array_atomic_get_u (arity 2 -> 1) + ArrayAtomicSet { ordering: $crate::Ordering, array_type_index: u32 } => visit_array_atomic_set (arity 3 -> 0) + ArrayAtomicRmwAdd { ordering: $crate::Ordering, array_type_index: u32 } => visit_array_atomic_rmw_add (atomic rmw array add) + ArrayAtomicRmwSub { ordering: $crate::Ordering, array_type_index: u32 } => visit_array_atomic_rmw_sub (atomic rmw array sub) + ArrayAtomicRmwAnd { ordering: $crate::Ordering, array_type_index: u32 } => visit_array_atomic_rmw_and (atomic rmw array and) + ArrayAtomicRmwOr { ordering: $crate::Ordering, array_type_index: u32 } => visit_array_atomic_rmw_or (atomic rmw array or) + ArrayAtomicRmwXor { ordering: $crate::Ordering, array_type_index: u32 } => visit_array_atomic_rmw_xor (atomic rmw array xor) + ArrayAtomicRmwXchg { ordering: $crate::Ordering, array_type_index: u32 } => visit_array_atomic_rmw_xchg (arity 3 -> 1) + ArrayAtomicRmwCmpxchg { ordering: $crate::Ordering, array_type_index: u32 } => visit_array_atomic_rmw_cmpxchg (arity 4 -> 1) + RefI31Shared => visit_ref_i31_shared (arity 1 -> 1) + } // Typed Function references - @function_references CallRef { type_index: u32 } => visit_call_ref (arity 1 type -> type) - @function_references ReturnCallRef { type_index: u32 } => visit_return_call_ref (arity 1 type -> 0) - @function_references RefAsNonNull => visit_ref_as_non_null (arity 1 -> 1) - @function_references BrOnNull { relative_depth: u32 } => visit_br_on_null (arity 1 br -> 1 br) - @function_references BrOnNonNull { relative_depth: u32 } => visit_br_on_non_null (arity br -> br -1) + @function_references { + CallRef { type_index: u32 } => visit_call_ref (arity 1 type -> type) + ReturnCallRef { type_index: u32 } => visit_return_call_ref (arity 1 type -> 0) + RefAsNonNull => visit_ref_as_non_null (arity 1 -> 1) + BrOnNull { relative_depth: u32 } => visit_br_on_null (arity 1 br -> 1 br) + BrOnNonNull { relative_depth: u32 } => visit_br_on_non_null (arity br -> br -1) + } // Stack switching - @stack_switching ContNew { cont_type_index: u32 } => visit_cont_new (arity 1 -> 1) - @stack_switching ContBind { argument_index: u32, result_index: u32 } => visit_cont_bind (arity type_diff 1 -> 1) - @stack_switching Suspend { tag_index: u32 } => visit_suspend (arity tag -> tag) - @stack_switching Resume { cont_type_index: u32, resume_table: $crate::ResumeTable } => visit_resume (arity 1 type -> type) - @stack_switching ResumeThrow { cont_type_index: u32, tag_index: u32, resume_table: $crate::ResumeTable } => visit_resume_throw (arity 1 tag -> type) - @stack_switching Switch { cont_type_index: u32, tag_index: u32 } => visit_switch (arity type -> ~switch) + @stack_switching { + ContNew { cont_type_index: u32 } => visit_cont_new (arity 1 -> 1) + ContBind { argument_index: u32, result_index: u32 } => visit_cont_bind (arity type_diff 1 -> 1) + Suspend { tag_index: u32 } => visit_suspend (arity tag -> tag) + Resume { cont_type_index: u32, resume_table: $crate::ResumeTable } => visit_resume (arity 1 type -> type) + ResumeThrow { cont_type_index: u32, tag_index: u32, resume_table: $crate::ResumeTable } => visit_resume_throw (arity 1 tag -> type) + Switch { cont_type_index: u32, tag_index: u32 } => visit_switch (arity type -> ~switch) + } - @wide_arithmetic I64Add128 => visit_i64_add128 (arity 4 -> 2) - @wide_arithmetic I64Sub128 => visit_i64_sub128 (arity 4 -> 2) - @wide_arithmetic I64MulWideS => visit_i64_mul_wide_s (arity 2 -> 2) - @wide_arithmetic I64MulWideU => visit_i64_mul_wide_u (arity 2 -> 2) + @wide_arithmetic { + I64Add128 => visit_i64_add128 (arity 4 -> 2) + I64Sub128 => visit_i64_sub128 (arity 4 -> 2) + I64MulWideS => visit_i64_mul_wide_s (arity 2 -> 2) + I64MulWideU => visit_i64_mul_wide_u (arity 2 -> 2) + } } }; } @@ -743,24 +776,33 @@ macro_rules! define_for_each_non_simd_operator { ( filter [$($t:tt)*] - @simd $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) $($rest:tt)* + @simd { + $( $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) )* + } + $($rest:tt)* ) => { define_for_each_non_simd_operator!(filter [$($t)*] $($rest)*); }; ( filter [$($t:tt)*] - @relaxed_simd $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) $($rest:tt)* + @relaxed_simd { + $( $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) )* + } + $($rest:tt)* ) => { define_for_each_non_simd_operator!(filter [$($t)*] $($rest)*); }; ( filter [$($t:tt)*] - @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) $($rest:tt)* + @$proposal:ident { + $( $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) )* + } + $($rest:tt)* ) => { define_for_each_non_simd_operator!( filter [ $($t)* - @$proposal $op $({ $($arg: $argty),* })? => $visit ($($ann)*) + $( @$proposal $op $({ $($arg: $argty),* })? => $visit ($($ann)*) )* ] $($rest)* ); @@ -931,31 +973,40 @@ macro_rules! define_for_each_simd_operator { ( filter [$($t:tt)*] - @simd $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) $($rest:tt)* + @simd { + $( $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) )* + } + $($rest:tt)* ) => { define_for_each_simd_operator!( filter [ $($t)* - @simd $op $({ $($arg: $argty),* })? => $visit ($($ann)*) + $( @simd $op $({ $($arg: $argty),* })? => $visit ($($ann)*) )* ] $($rest)* ); }; ( filter [$($t:tt)*] - @relaxed_simd $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) $($rest:tt)* + @relaxed_simd { + $( $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) )* + } + $($rest:tt)* ) => { define_for_each_simd_operator!( filter [ $($t)* - @relaxed_simd $op $({ $($arg: $argty),* })? => $visit ($($ann)*) + $( @relaxed_simd $op $({ $($arg: $argty),* })? => $visit ($($ann)*) )* ] $($rest)* ); }; ( filter [$($t:tt)*] - @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) $($rest:tt)* + @$proposal:ident { + $( $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) )* + } + $($rest:tt)* ) => { define_for_each_simd_operator!(filter [$($t)*] $($rest)*); }; From 0123e1ccf752132aa411adf2ac254c773e3d1518 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Mon, 25 Nov 2024 14:40:31 +0100 Subject: [PATCH 59/83] apply rustfmt --- crates/wasmparser/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/wasmparser/src/lib.rs b/crates/wasmparser/src/lib.rs index 82b11f9eb5..5e4488e6e9 100644 --- a/crates/wasmparser/src/lib.rs +++ b/crates/wasmparser/src/lib.rs @@ -242,7 +242,7 @@ macro_rules! _for_each_operator { I64Extend16S => visit_i64_extend16_s (unary i64) I64Extend32S => visit_i64_extend32_s (unary i64) } - + // 0xFB prefixed operators // Garbage Collection // http://github.com/WebAssembly/gc From 88e447ad168c6bb6a9219cd9e4d06d6486967b7a Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 26 Nov 2024 15:03:32 +0100 Subject: [PATCH 60/83] re-structure the new exposed macros The wasmparser crate now exposes 3 different for_each macros: - for_each_operator: Same as before the PR. Iterates over _all_ operators. This is going to be used to implement a routine for all Operator enum variants. - for_each_visit_oeprator: This is going to be used to implement the `VisitOperator` trait. - for_each_visit_simd_operator: This is going to be used to implement the `VisitSimdOperator` trait. --- crates/wasmparser/src/arity.rs | 4 +- crates/wasmparser/src/binary_reader.rs | 4 +- crates/wasmparser/src/lib.rs | 578 ++++++++++++------ .../wasmparser/src/readers/core/operators.rs | 20 +- crates/wasmparser/src/validator/core.rs | 4 +- crates/wasmparser/src/validator/operators.rs | 4 +- 6 files changed, 419 insertions(+), 195 deletions(-) diff --git a/crates/wasmparser/src/arity.rs b/crates/wasmparser/src/arity.rs index ebd7da4512..2a608a2d7b 100644 --- a/crates/wasmparser/src/arity.rs +++ b/crates/wasmparser/src/arity.rs @@ -257,7 +257,7 @@ impl Operator<'_> { } ); } - for_each_operator!(define_arity) + for_each_visit_operator!(define_arity) } } @@ -280,6 +280,6 @@ impl SimdOperator { } ); } - for_each_simd_operator!(define_arity) + for_each_visit_simd_operator!(define_arity) } } diff --git a/crates/wasmparser/src/binary_reader.rs b/crates/wasmparser/src/binary_reader.rs index 3c0120ba87..4362680464 100644 --- a/crates/wasmparser/src/binary_reader.rs +++ b/crates/wasmparser/src/binary_reader.rs @@ -1800,7 +1800,7 @@ impl<'a> VisitOperator<'a> for OperatorFactory<'a> { Some(self) } - for_each_operator!(define_visit_operator); + for_each_visit_operator!(define_visit_operator); } #[cfg(feature = "simd")] @@ -1816,7 +1816,7 @@ macro_rules! define_visit_simd_operator { #[cfg(feature = "simd")] impl<'a> VisitSimdOperator<'a> for OperatorFactory<'a> { - for_each_simd_operator!(define_visit_simd_operator); + for_each_visit_simd_operator!(define_visit_simd_operator); } /// Iterator returned from [`BinaryReader::read_iter`]. diff --git a/crates/wasmparser/src/lib.rs b/crates/wasmparser/src/lib.rs index 5e4488e6e9..408fa64b68 100644 --- a/crates/wasmparser/src/lib.rs +++ b/crates/wasmparser/src/lib.rs @@ -55,9 +55,8 @@ mod prelude { pub use crate::collections::{IndexMap, Map, Set}; } -#[macro_export] #[doc(hidden)] -macro_rules! _for_each_operator { +macro_rules! _for_each_operator_group { ($mac:ident) => { $mac! { @mvp { @@ -771,6 +770,32 @@ macro_rules! _for_each_operator { }; } +#[cfg(feature = "simd")] +macro_rules! define_for_each_operator { + ( + $( + @$proposal:ident { + $( $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) )* + } + )* + ) => { + #[doc(hidden)] + macro_rules! _for_each_operator { + ($m:ident) => { + $m! { + $( + $( + @$proposal $op $({$($arg: $argty),*})? => $visit ($($ann)*) + )* + )* + } + } + } + }; +} +#[cfg(feature = "simd")] +_for_each_operator_group!(define_for_each_operator); + macro_rules! define_for_each_non_simd_operator { (@ $($t:tt)*) => {define_for_each_non_simd_operator!(filter [] @ $($t)*);}; @@ -809,163 +834,15 @@ macro_rules! define_for_each_non_simd_operator { }; (filter [$($t:tt)*]) => { - /// A helper macro to conveniently iterate over all opcodes recognized by this - /// crate. This can be used to work with either the [`Operator`] enumeration or - /// the [`VisitOperator`] trait if your use case uniformly handles all operators - /// the same way. - /// - /// Note: SIMD operators are handled by the [`for_each_simd_operator`] macro. - /// - /// It is also possible to specialize handling of operators depending on the - /// Wasm proposal from which they are originating. - /// - /// This is an "iterator macro" where this macro is invoked with the name of - /// another macro, and then that macro is invoked with the list of all - /// operators. An example invocation of this looks like: - /// - /// The list of specializable Wasm proposals is as follows: - /// - /// - `@mvp`: Denoting a Wasm operator from the initial Wasm MVP version. - /// - `@exceptions`: [Wasm `exception-handling` proposal] - /// - `@tail_call`: [Wasm `tail-calls` proposal] - /// - `@reference_types`: [Wasm `reference-types` proposal] - /// - `@sign_extension`: [Wasm `sign-extension-ops` proposal] - /// - `@saturating_float_to_int`: [Wasm `non_trapping_float-to-int-conversions` proposal] - /// - `@bulk_memory `:[Wasm `bulk-memory` proposal] - /// - `@threads`: [Wasm `threads` proposal] - /// - `@gc`: [Wasm `gc` proposal] - /// - `@stack_switching`: [Wasm `stack-switching` proposal] - /// - `@wide_arithmetic`: [Wasm `wide-arithmetic` proposal] - /// - /// [Wasm `exception-handling` proposal]: - /// https://github.com/WebAssembly/exception-handling - /// - /// [Wasm `tail-calls` proposal]: - /// https://github.com/WebAssembly/tail-call - /// - /// [Wasm `reference-types` proposal]: - /// https://github.com/WebAssembly/reference-types - /// - /// [Wasm `sign-extension-ops` proposal]: - /// https://github.com/WebAssembly/sign-extension-ops - /// - /// [Wasm `non_trapping_float-to-int-conversions` proposal]: - /// https://github.com/WebAssembly/nontrapping-float-to-int-conversions - /// - /// [Wasm `bulk-memory` proposal]: - /// https://github.com/WebAssembly/bulk-memory-operations - /// - /// [Wasm `threads` proposal]: - /// https://github.com/webassembly/threads - /// - /// [Wasm `gc` proposal]: - /// https://github.com/WebAssembly/gc - /// - /// [Wasm `stack-switching` proposal]: - /// https://github.com/WebAssembly/stack-switching - /// - /// [Wasm `wide-arithmetic` proposal]: - /// https://github.com/WebAssembly/wide-arithmetic - /// - /// ``` - /// macro_rules! define_visit_operator { - /// // The outer layer of repetition represents how all operators are - /// // provided to the macro at the same time. - /// // - /// // The `$proposal` identifier indicates the Wasm proposals from which - /// // the Wasm operator is originating. - /// // For example to specialize the macro match arm for Wasm SIMD proposal - /// // operators you could write `@simd` instead of `@$proposal:ident` to - /// // only catch those operators. - /// // - /// // The `$op` name is bound to the `Operator` variant name. The - /// // payload of the operator is optionally specified (the `$(...)?` - /// // clause) since not all instructions have payloads. Within the payload - /// // each argument is named and has its type specified. - /// // - /// // The `$visit` name is bound to the corresponding name in the - /// // `VisitOperator` trait that this corresponds to. - /// // - /// // The `$ann` annotations give information about the operator's type (e.g. binary i32 or arity 2 -> 1). - /// ($( @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => { - /// $( - /// fn $visit(&mut self $($(,$arg: $argty)*)?) { - /// // do nothing for this example - /// } - /// )* - /// } - /// } - /// - /// pub struct VisitAndDoNothing; - /// - /// impl<'a> wasmparser::VisitOperator<'a> for VisitAndDoNothing { - /// type Output = (); - /// - /// wasmparser::for_each_operator!(define_visit_operator); - /// } - /// ``` - /// - /// If you only wanted to visit the initial base set of wasm instructions, for - /// example, you could do: - /// - /// ``` - /// macro_rules! visit_only_mvp { - /// // delegate the macro invocation to sub-invocations of this macro to - /// // deal with each instruction on a case-by-case basis. - /// ($( @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => { - /// $( - /// visit_only_mvp!(visit_one @$proposal $op $({ $($arg: $argty),* })? => $visit); - /// )* - /// }; - /// - /// // MVP instructions are defined manually, so do nothing. - /// (visit_one @mvp $($rest:tt)*) => {}; - /// - /// // Non-MVP instructions all return `false` here. The exact type depends - /// // on `type Output` in the trait implementation below. You could change - /// // it to `Result<()>` for example and return an error here too. - /// (visit_one @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident) => { - /// fn $visit(&mut self $($(,$arg: $argty)*)?) -> bool { - /// false - /// } - /// } - /// } - /// # // to get this example to compile another macro is used here to define - /// # // visit methods for all mvp oeprators. - /// # macro_rules! visit_mvp { - /// # ($( @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => { - /// # $( - /// # visit_mvp!(visit_one @$proposal $op $({ $($arg: $argty),* })? => $visit); - /// # )* - /// # }; - /// # (visit_one @mvp $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident) => { - /// # fn $visit(&mut self $($(,$arg: $argty)*)?) -> bool { - /// # true - /// # } - /// # }; - /// # (visit_one @$proposal:ident $($rest:tt)*) => {}; - /// # } - /// - /// pub struct VisitOnlyMvp; - /// - /// impl<'a> wasmparser::VisitOperator<'a> for VisitOnlyMvp { - /// type Output = bool; - /// - /// wasmparser::for_each_operator!(visit_only_mvp); - /// # wasmparser::for_each_operator!(visit_mvp); - /// - /// // manually define `visit_*` for all MVP operators here - /// } - /// ``` - #[macro_export] - macro_rules! for_each_operator { + #[doc(hidden)] + macro_rules! _for_each_non_simd_operator { ($m:ident) => { $m! { $($t)* } } } }; } -_for_each_operator!(define_for_each_non_simd_operator); +_for_each_operator_group!(define_for_each_non_simd_operator); #[cfg(feature = "simd")] macro_rules! define_for_each_simd_operator { @@ -1012,29 +889,8 @@ macro_rules! define_for_each_simd_operator { }; (filter [$($t:tt)*]) => { - /// A helper macro to conveniently iterate over all opcodes recognized by this - /// crate. This can be used to work with either the [`SimdOperator`] enumeration or - /// the [`VisitSimdOperator`] trait if your use case uniformly handles all operators - /// the same way. - /// - /// The list of specializable Wasm proposals is as follows: - /// - /// - `@simd`: [Wasm `simd` proposal] - /// - `@relaxed_simd`: [Wasm `relaxed-simd` proposal] - /// - /// For more information about the structure and use of this macro please - /// refer to the documentation of the [`for_each_operator`] macro. - /// - /// [Wasm `simd` proposal]: - /// https://github.com/webassembly/simd - /// - /// [Wasm `relaxed-simd` proposal]: - /// https://github.com/WebAssembly/relaxed-simd - /// - /// [`SimdOperator`]: crate::SimdOperator - /// [`VisitSimdOperator`]: crate::VisitSimdOperator - #[macro_export] - macro_rules! for_each_simd_operator { + #[doc(hidden)] + macro_rules! _for_each_simd_operator { ($m:ident) => { $m! { $($t)* } } @@ -1042,7 +898,375 @@ macro_rules! define_for_each_simd_operator { }; } #[cfg(feature = "simd")] -_for_each_operator!(define_for_each_simd_operator); +_for_each_operator_group!(define_for_each_simd_operator); + +#[cfg(feature = "simd")] +#[doc(hidden)] +macro_rules! _for_each_operator_delegate { + ($mac:ident) => { + _for_each_operator! { $mac } + }; +} + +#[cfg(not(feature = "simd"))] +#[doc(hidden)] +macro_rules! _for_each_operator_delegate { + ($mac:ident) => { + _for_each_non_simd_operator! { $mac } + }; +} + +/// A helper macro to conveniently iterate over all opcodes recognized by this +/// crate. This can be used to work with either the [`Operator`] enumeration or +/// the [`VisitOperator`] trait if your use case uniformly handles all operators +/// the same way. +/// +/// Note: SIMD operators are handled by the [`for_each_simd_operator`] macro. +/// +/// It is also possible to specialize handling of operators depending on the +/// Wasm proposal from which they are originating. +/// +/// This is an "iterator macro" where this macro is invoked with the name of +/// another macro, and then that macro is invoked with the list of all +/// operators. An example invocation of this looks like: +/// +/// The list of specializable Wasm proposals is as follows: +/// +/// - `@mvp`: Denoting a Wasm operator from the initial Wasm MVP version. +/// - `@exceptions`: [Wasm `exception-handling` proposal] +/// - `@tail_call`: [Wasm `tail-calls` proposal] +/// - `@reference_types`: [Wasm `reference-types` proposal] +/// - `@sign_extension`: [Wasm `sign-extension-ops` proposal] +/// - `@saturating_float_to_int`: [Wasm `non_trapping_float-to-int-conversions` proposal] +/// - `@bulk_memory `:[Wasm `bulk-memory` proposal] +/// - `@simd`: [Wasm `simd` proposal] +/// - `@relaxed_simd`: [Wasm `relaxed-simd` proposal] +/// - `@threads`: [Wasm `threads` proposal] +/// - `@gc`: [Wasm `gc` proposal] +/// - `@stack_switching`: [Wasm `stack-switching` proposal] +/// - `@wide_arithmetic`: [Wasm `wide-arithmetic` proposal] +/// +/// [Wasm `exception-handling` proposal]: +/// https://github.com/WebAssembly/exception-handling +/// +/// [Wasm `tail-calls` proposal]: +/// https://github.com/WebAssembly/tail-call +/// +/// [Wasm `reference-types` proposal]: +/// https://github.com/WebAssembly/reference-types +/// +/// [Wasm `sign-extension-ops` proposal]: +/// https://github.com/WebAssembly/sign-extension-ops +/// +/// [Wasm `non_trapping_float-to-int-conversions` proposal]: +/// https://github.com/WebAssembly/nontrapping-float-to-int-conversions +/// +/// [Wasm `bulk-memory` proposal]: +/// https://github.com/WebAssembly/bulk-memory-operations +/// +/// [Wasm `simd` proposal]: +/// https://github.com/webassembly/simd +/// +/// [Wasm `relaxed-simd` proposal]: +/// https://github.com/WebAssembly/relaxed-simd +/// +/// [Wasm `threads` proposal]: +/// https://github.com/webassembly/threads +/// +/// [Wasm `gc` proposal]: +/// https://github.com/WebAssembly/gc +/// +/// [Wasm `stack-switching` proposal]: +/// https://github.com/WebAssembly/stack-switching +/// +/// [Wasm `wide-arithmetic` proposal]: +/// https://github.com/WebAssembly/wide-arithmetic +/// +/// ``` +/// macro_rules! define_visit_operator { +/// // The outer layer of repetition represents how all operators are +/// // provided to the macro at the same time. +/// // +/// // The `$proposal` identifier indicates the Wasm proposals from which +/// // the Wasm operator is originating. +/// // For example to specialize the macro match arm for Wasm SIMD proposal +/// // operators you could write `@simd` instead of `@$proposal:ident` to +/// // only catch those operators. +/// // +/// // The `$op` name is bound to the `Operator` variant name. The +/// // payload of the operator is optionally specified (the `$(...)?` +/// // clause) since not all instructions have payloads. Within the payload +/// // each argument is named and has its type specified. +/// // +/// // The `$visit` name is bound to the corresponding name in the +/// // `VisitOperator` trait that this corresponds to. +/// // +/// // The `$ann` annotations give information about the operator's type (e.g. binary i32 or arity 2 -> 1). +/// ($( @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => { +/// $( +/// fn $visit(&mut self $($(,$arg: $argty)*)?) { +/// // do nothing for this example +/// } +/// )* +/// } +/// } +/// +/// pub struct VisitAndDoNothing; +/// +/// impl<'a> wasmparser::VisitOperator<'a> for VisitAndDoNothing { +/// type Output = (); +/// +/// wasmparser::for_each_operator!(define_visit_operator); +/// } +/// ``` +/// +/// If you only wanted to visit the initial base set of wasm instructions, for +/// example, you could do: +/// +/// ``` +/// macro_rules! visit_only_mvp { +/// // delegate the macro invocation to sub-invocations of this macro to +/// // deal with each instruction on a case-by-case basis. +/// ($( @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => { +/// $( +/// visit_only_mvp!(visit_one @$proposal $op $({ $($arg: $argty),* })? => $visit); +/// )* +/// }; +/// +/// // MVP instructions are defined manually, so do nothing. +/// (visit_one @mvp $($rest:tt)*) => {}; +/// +/// // Non-MVP instructions all return `false` here. The exact type depends +/// // on `type Output` in the trait implementation below. You could change +/// // it to `Result<()>` for example and return an error here too. +/// (visit_one @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident) => { +/// fn $visit(&mut self $($(,$arg: $argty)*)?) -> bool { +/// false +/// } +/// } +/// } +/// # // to get this example to compile another macro is used here to define +/// # // visit methods for all mvp oeprators. +/// # macro_rules! visit_mvp { +/// # ($( @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => { +/// # $( +/// # visit_mvp!(visit_one @$proposal $op $({ $($arg: $argty),* })? => $visit); +/// # )* +/// # }; +/// # (visit_one @mvp $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident) => { +/// # fn $visit(&mut self $($(,$arg: $argty)*)?) -> bool { +/// # true +/// # } +/// # }; +/// # (visit_one @$proposal:ident $($rest:tt)*) => {}; +/// # } +/// +/// pub struct VisitOnlyMvp; +/// +/// impl<'a> wasmparser::VisitOperator<'a> for VisitOnlyMvp { +/// type Output = bool; +/// +/// wasmparser::for_each_operator!(visit_only_mvp); +/// # wasmparser::for_each_operator!(visit_mvp); +/// +/// // manually define `visit_*` for all MVP operators here +/// } +/// ``` +#[macro_export] +macro_rules! for_each_operator { + ($mac:ident) => { + _for_each_operator_delegate! { $mac } + }; +} + +/// A helper macro to conveniently iterate over all opcodes recognized by this +/// crate. This can be used to work with either the [`Operator`] enumeration or +/// the [`VisitOperator`] trait if your use case uniformly handles all operators +/// the same way. +/// +/// Note: SIMD operators are handled by the [`for_each_simd_operator`] macro. +/// +/// It is also possible to specialize handling of operators depending on the +/// Wasm proposal from which they are originating. +/// +/// This is an "iterator macro" where this macro is invoked with the name of +/// another macro, and then that macro is invoked with the list of all +/// operators. An example invocation of this looks like: +/// +/// The list of specializable Wasm proposals is as follows: +/// +/// - `@mvp`: Denoting a Wasm operator from the initial Wasm MVP version. +/// - `@exceptions`: [Wasm `exception-handling` proposal] +/// - `@tail_call`: [Wasm `tail-calls` proposal] +/// - `@reference_types`: [Wasm `reference-types` proposal] +/// - `@sign_extension`: [Wasm `sign-extension-ops` proposal] +/// - `@saturating_float_to_int`: [Wasm `non_trapping_float-to-int-conversions` proposal] +/// - `@bulk_memory `:[Wasm `bulk-memory` proposal] +/// - `@threads`: [Wasm `threads` proposal] +/// - `@gc`: [Wasm `gc` proposal] +/// - `@stack_switching`: [Wasm `stack-switching` proposal] +/// - `@wide_arithmetic`: [Wasm `wide-arithmetic` proposal] +/// +/// [Wasm `exception-handling` proposal]: +/// https://github.com/WebAssembly/exception-handling +/// +/// [Wasm `tail-calls` proposal]: +/// https://github.com/WebAssembly/tail-call +/// +/// [Wasm `reference-types` proposal]: +/// https://github.com/WebAssembly/reference-types +/// +/// [Wasm `sign-extension-ops` proposal]: +/// https://github.com/WebAssembly/sign-extension-ops +/// +/// [Wasm `non_trapping_float-to-int-conversions` proposal]: +/// https://github.com/WebAssembly/nontrapping-float-to-int-conversions +/// +/// [Wasm `bulk-memory` proposal]: +/// https://github.com/WebAssembly/bulk-memory-operations +/// [Wasm `simd` proposal]: +/// https://github.com/webassembly/simd +/// +/// [Wasm `relaxed-simd` proposal]: +/// https://github.com/WebAssembly/relaxed-simd +/// +/// [Wasm `threads` proposal]: +/// https://github.com/webassembly/threads +/// +/// [Wasm `gc` proposal]: +/// https://github.com/WebAssembly/gc +/// +/// [Wasm `stack-switching` proposal]: +/// https://github.com/WebAssembly/stack-switching +/// +/// [Wasm `wide-arithmetic` proposal]: +/// https://github.com/WebAssembly/wide-arithmetic +/// +/// ``` +/// macro_rules! define_visit_operator { +/// // The outer layer of repetition represents how all operators are +/// // provided to the macro at the same time. +/// // +/// // The `$proposal` identifier indicates the Wasm proposals from which +/// // the Wasm operator is originating. +/// // For example to specialize the macro match arm for Wasm SIMD proposal +/// // operators you could write `@simd` instead of `@$proposal:ident` to +/// // only catch those operators. +/// // +/// // The `$op` name is bound to the `Operator` variant name. The +/// // payload of the operator is optionally specified (the `$(...)?` +/// // clause) since not all instructions have payloads. Within the payload +/// // each argument is named and has its type specified. +/// // +/// // The `$visit` name is bound to the corresponding name in the +/// // `VisitOperator` trait that this corresponds to. +/// // +/// // The `$ann` annotations give information about the operator's type (e.g. binary i32 or arity 2 -> 1). +/// ($( @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => { +/// $( +/// fn $visit(&mut self $($(,$arg: $argty)*)?) { +/// // do nothing for this example +/// } +/// )* +/// } +/// } +/// +/// pub struct VisitAndDoNothing; +/// +/// impl<'a> wasmparser::VisitOperator<'a> for VisitAndDoNothing { +/// type Output = (); +/// +/// wasmparser::for_each_operator!(define_visit_operator); +/// } +/// ``` +/// +/// If you only wanted to visit the initial base set of wasm instructions, for +/// example, you could do: +/// +/// ``` +/// macro_rules! visit_only_mvp { +/// // delegate the macro invocation to sub-invocations of this macro to +/// // deal with each instruction on a case-by-case basis. +/// ($( @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => { +/// $( +/// visit_only_mvp!(visit_one @$proposal $op $({ $($arg: $argty),* })? => $visit); +/// )* +/// }; +/// +/// // MVP instructions are defined manually, so do nothing. +/// (visit_one @mvp $($rest:tt)*) => {}; +/// +/// // Non-MVP instructions all return `false` here. The exact type depends +/// // on `type Output` in the trait implementation below. You could change +/// // it to `Result<()>` for example and return an error here too. +/// (visit_one @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident) => { +/// fn $visit(&mut self $($(,$arg: $argty)*)?) -> bool { +/// false +/// } +/// } +/// } +/// # // to get this example to compile another macro is used here to define +/// # // visit methods for all mvp oeprators. +/// # macro_rules! visit_mvp { +/// # ($( @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => { +/// # $( +/// # visit_mvp!(visit_one @$proposal $op $({ $($arg: $argty),* })? => $visit); +/// # )* +/// # }; +/// # (visit_one @mvp $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident) => { +/// # fn $visit(&mut self $($(,$arg: $argty)*)?) -> bool { +/// # true +/// # } +/// # }; +/// # (visit_one @$proposal:ident $($rest:tt)*) => {}; +/// # } +/// +/// pub struct VisitOnlyMvp; +/// +/// impl<'a> wasmparser::VisitOperator<'a> for VisitOnlyMvp { +/// type Output = bool; +/// +/// wasmparser::for_each_operator!(visit_only_mvp); +/// # wasmparser::for_each_operator!(visit_mvp); +/// +/// // manually define `visit_*` for all MVP operators here +/// } +/// ``` +#[macro_export] +macro_rules! for_each_visit_operator { + ($mac:ident) => { + _for_each_non_simd_operator! { $mac } + }; +} + +/// A helper macro to conveniently iterate over all opcodes recognized by this +/// crate. This can be used to work with either the [`SimdOperator`] enumeration or +/// the [`VisitSimdOperator`] trait if your use case uniformly handles all operators +/// the same way. +/// +/// The list of specializable Wasm proposals is as follows: +/// +/// - `@simd`: [Wasm `simd` proposal] +/// - `@relaxed_simd`: [Wasm `relaxed-simd` proposal] +/// +/// For more information about the structure and use of this macro please +/// refer to the documentation of the [`for_each_operator`] macro. +/// +/// [Wasm `simd` proposal]: +/// https://github.com/webassembly/simd +/// +/// [Wasm `relaxed-simd` proposal]: +/// https://github.com/WebAssembly/relaxed-simd +/// +/// [`SimdOperator`]: crate::SimdOperator +/// [`VisitSimdOperator`]: crate::VisitSimdOperator +#[cfg(feature = "simd")] +#[macro_export] +macro_rules! for_each_visit_simd_operator { + ($mac:ident) => { + _for_each_simd_operator! { $mac } + }; +} macro_rules! format_err { ($offset:expr, $($arg:tt)*) => { diff --git a/crates/wasmparser/src/readers/core/operators.rs b/crates/wasmparser/src/readers/core/operators.rs index dc769f14ce..b2b7d7e716 100644 --- a/crates/wasmparser/src/readers/core/operators.rs +++ b/crates/wasmparser/src/readers/core/operators.rs @@ -230,7 +230,7 @@ macro_rules! define_operator { } } } -for_each_operator!(define_operator); +for_each_visit_operator!(define_operator); #[cfg(feature = "simd")] macro_rules! define_simd_operator { @@ -248,7 +248,7 @@ macro_rules! define_simd_operator { } } #[cfg(feature = "simd")] -for_each_simd_operator!(define_simd_operator); +for_each_visit_simd_operator!(define_simd_operator); /// A reader for a core WebAssembly function's operators. #[derive(Clone)] @@ -454,7 +454,7 @@ pub trait VisitOperator<'a> { } } } - for_each_operator!(visit_operator) + for_each_visit_operator!(visit_operator) } /// Returns a mutable reference to a [`VisitSimdOperator`] visitor. @@ -485,7 +485,7 @@ pub trait VisitOperator<'a> { None } - for_each_operator!(define_visit_operator); + for_each_visit_operator!(define_visit_operator); } /// Trait implemented by types that can visit all [`SimdOperator`] variants. @@ -510,10 +510,10 @@ pub trait VisitSimdOperator<'a>: VisitOperator<'a> { } } } - for_each_simd_operator!(visit_simd_operator) + for_each_visit_simd_operator!(visit_simd_operator) } - for_each_simd_operator!(define_visit_operator); + for_each_visit_simd_operator!(define_visit_operator); } macro_rules! define_visit_operator_delegate { @@ -535,7 +535,7 @@ impl<'a, 'b, V: VisitOperator<'a> + ?Sized> VisitOperator<'a> for &'b mut V { fn simd_visitor(&mut self) -> Option<&mut dyn VisitSimdOperator<'a, Output = V::Output>> { V::simd_visitor(*self) } - for_each_operator!(define_visit_operator_delegate); + for_each_visit_operator!(define_visit_operator_delegate); } #[cfg(feature = "simd")] @@ -543,7 +543,7 @@ impl<'a, 'b, V: VisitSimdOperator<'a> + ?Sized> VisitSimdOperator<'a> for &'b mu fn visit_simd_operator(&mut self, op: &SimdOperator) -> Self::Output { V::visit_simd_operator(*self, op) } - for_each_simd_operator!(define_visit_operator_delegate); + for_each_visit_simd_operator!(define_visit_operator_delegate); } impl<'a, V: VisitOperator<'a> + ?Sized> VisitOperator<'a> for Box { @@ -555,7 +555,7 @@ impl<'a, V: VisitOperator<'a> + ?Sized> VisitOperator<'a> for Box { fn simd_visitor(&mut self) -> Option<&mut dyn VisitSimdOperator<'a, Output = V::Output>> { V::simd_visitor(&mut *self) } - for_each_operator!(define_visit_operator_delegate); + for_each_visit_operator!(define_visit_operator_delegate); } #[cfg(feature = "simd")] @@ -563,7 +563,7 @@ impl<'a, V: VisitSimdOperator<'a> + ?Sized> VisitSimdOperator<'a> for Box { fn visit_simd_operator(&mut self, op: &SimdOperator) -> Self::Output { V::visit_simd_operator(&mut *self, op) } - for_each_simd_operator!(define_visit_operator_delegate); + for_each_visit_simd_operator!(define_visit_operator_delegate); } /// A `try_table` entries representation. diff --git a/crates/wasmparser/src/validator/core.rs b/crates/wasmparser/src/validator/core.rs index 49e38b82cb..e753602104 100644 --- a/crates/wasmparser/src/validator/core.rs +++ b/crates/wasmparser/src/validator/core.rs @@ -531,12 +531,12 @@ impl ModuleState { Some(self) } - for_each_operator!(define_visit_operator); + for_each_visit_operator!(define_visit_operator); } #[cfg(feature = "simd")] impl<'a> VisitSimdOperator<'a> for VisitConstOperator<'a> { - for_each_simd_operator!(define_visit_operator); + for_each_visit_simd_operator!(define_visit_operator); } } } diff --git a/crates/wasmparser/src/validator/operators.rs b/crates/wasmparser/src/validator/operators.rs index eae075bb93..58525378a7 100644 --- a/crates/wasmparser/src/validator/operators.rs +++ b/crates/wasmparser/src/validator/operators.rs @@ -1760,7 +1760,7 @@ where Some(self) } - for_each_operator!(validate_proposal); + for_each_visit_operator!(validate_proposal); } #[cfg(feature = "simd")] @@ -1768,7 +1768,7 @@ impl<'a, T> VisitSimdOperator<'a> for WasmProposalValidator<'_, '_, T> where T: WasmModuleResources, { - for_each_simd_operator!(validate_proposal); + for_each_visit_simd_operator!(validate_proposal); } #[track_caller] From dd7319b756c47969ade687e552908ba416c95918 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 26 Nov 2024 16:12:48 +0100 Subject: [PATCH 61/83] fix macro doc links --- crates/wasmparser/src/lib.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/crates/wasmparser/src/lib.rs b/crates/wasmparser/src/lib.rs index 408fa64b68..48e3e2a416 100644 --- a/crates/wasmparser/src/lib.rs +++ b/crates/wasmparser/src/lib.rs @@ -916,13 +916,13 @@ macro_rules! _for_each_operator_delegate { }; } +/// Used to implement routines for the `Operator` enum. +/// /// A helper macro to conveniently iterate over all opcodes recognized by this /// crate. This can be used to work with either the [`Operator`] enumeration or /// the [`VisitOperator`] trait if your use case uniformly handles all operators /// the same way. /// -/// Note: SIMD operators are handled by the [`for_each_simd_operator`] macro. -/// /// It is also possible to specialize handling of operators depending on the /// Wasm proposal from which they are originating. /// @@ -1079,13 +1079,13 @@ macro_rules! for_each_operator { }; } +/// Used to implement the `VisitOperator` trait. +/// /// A helper macro to conveniently iterate over all opcodes recognized by this /// crate. This can be used to work with either the [`Operator`] enumeration or /// the [`VisitOperator`] trait if your use case uniformly handles all operators /// the same way. /// -/// Note: SIMD operators are handled by the [`for_each_simd_operator`] macro. -/// /// It is also possible to specialize handling of operators depending on the /// Wasm proposal from which they are originating. /// @@ -1239,6 +1239,8 @@ macro_rules! for_each_visit_operator { }; } +/// Used to implement the `VisitSimdOperator` trait. +/// /// A helper macro to conveniently iterate over all opcodes recognized by this /// crate. This can be used to work with either the [`SimdOperator`] enumeration or /// the [`VisitSimdOperator`] trait if your use case uniformly handles all operators From 7e56835e2dd5df9e7151b32d3bee87897e55633b Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 26 Nov 2024 17:07:06 +0100 Subject: [PATCH 62/83] fix broken doc links --- crates/wasmparser/src/lib.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/crates/wasmparser/src/lib.rs b/crates/wasmparser/src/lib.rs index 48e3e2a416..8638790430 100644 --- a/crates/wasmparser/src/lib.rs +++ b/crates/wasmparser/src/lib.rs @@ -1241,11 +1241,6 @@ macro_rules! for_each_visit_operator { /// Used to implement the `VisitSimdOperator` trait. /// -/// A helper macro to conveniently iterate over all opcodes recognized by this -/// crate. This can be used to work with either the [`SimdOperator`] enumeration or -/// the [`VisitSimdOperator`] trait if your use case uniformly handles all operators -/// the same way. -/// /// The list of specializable Wasm proposals is as follows: /// /// - `@simd`: [Wasm `simd` proposal] @@ -1260,7 +1255,6 @@ macro_rules! for_each_visit_operator { /// [Wasm `relaxed-simd` proposal]: /// https://github.com/WebAssembly/relaxed-simd /// -/// [`SimdOperator`]: crate::SimdOperator /// [`VisitSimdOperator`]: crate::VisitSimdOperator #[cfg(feature = "simd")] #[macro_export] From cbc4488d217bdc268de94bc0aeb7eb94a6e5bd30 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 26 Nov 2024 17:08:02 +0100 Subject: [PATCH 63/83] fix crate after making Operator enum inline --- crates/wasmparser/src/arity.rs | 29 +----- crates/wasmparser/src/binary_reader.rs | 13 +-- .../wasmparser/src/readers/core/operators.rs | 88 ++++++------------- 3 files changed, 30 insertions(+), 100 deletions(-) diff --git a/crates/wasmparser/src/arity.rs b/crates/wasmparser/src/arity.rs index 2a608a2d7b..6109edd9f5 100644 --- a/crates/wasmparser/src/arity.rs +++ b/crates/wasmparser/src/arity.rs @@ -13,8 +13,6 @@ * limitations under the License. */ -#[cfg(feature = "simd")] -use crate::SimdOperator; use crate::{ BinaryReader, BinaryReaderError, BlockType, CompositeInnerType, ContType, FrameKind, FuncType, Operator, RefType, Result, SubType, @@ -252,34 +250,9 @@ impl Operator<'_> { operator_arity!(arity module $({ $($arg: $argty),* })? $($ann)*) } )* - #[cfg(feature = "simd")] - Self::Simd(operator) => operator.operator_arity(), } ); } - for_each_visit_operator!(define_arity) - } -} - -#[cfg(feature = "simd")] -impl SimdOperator { - /// Compute the arity (param and result counts) of the operator, given - /// an impl ModuleArity, which stores the necessary module state. - pub fn operator_arity(&self) -> Option<(u32, u32)> { - macro_rules! define_arity { - ( $(@$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) )*) => ( - match self.clone() { - $( - Self::$op $({ $($arg),* })? => { - $( - $(let _ = $arg;)* - )? - operator_arity!(arity module $({ $($arg: $argty),* })? $($ann)*) - } - )* - } - ); - } - for_each_visit_simd_operator!(define_arity) + for_each_operator!(define_arity) } } diff --git a/crates/wasmparser/src/binary_reader.rs b/crates/wasmparser/src/binary_reader.rs index 4362680464..da25850c70 100644 --- a/crates/wasmparser/src/binary_reader.rs +++ b/crates/wasmparser/src/binary_reader.rs @@ -1803,20 +1803,9 @@ impl<'a> VisitOperator<'a> for OperatorFactory<'a> { for_each_visit_operator!(define_visit_operator); } -#[cfg(feature = "simd")] -macro_rules! define_visit_simd_operator { - ($(@$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => { - $( - fn $visit(&mut self $($(,$arg: $argty)*)?) -> Operator<'a> { - Operator::Simd(SimdOperator::$op $({ $($arg),* })?) - } - )* - } -} - #[cfg(feature = "simd")] impl<'a> VisitSimdOperator<'a> for OperatorFactory<'a> { - for_each_visit_simd_operator!(define_visit_simd_operator); + for_each_visit_simd_operator!(define_visit_operator); } /// Iterator returned from [`BinaryReader::read_iter`]. diff --git a/crates/wasmparser/src/readers/core/operators.rs b/crates/wasmparser/src/readers/core/operators.rs index b2b7d7e716..1db6b8f5f6 100644 --- a/crates/wasmparser/src/readers/core/operators.rs +++ b/crates/wasmparser/src/readers/core/operators.rs @@ -225,30 +225,10 @@ macro_rules! define_operator { $( $op $({ $($payload)* })?, )* - #[cfg(feature = "simd")] - Simd(SimdOperator), } } } -for_each_visit_operator!(define_operator); - -#[cfg(feature = "simd")] -macro_rules! define_simd_operator { - ($(@$proposal:ident $op:ident $({ $($payload:tt)* })? => $visit:ident ($($ann:tt)*))*) => { - /// The subset of Wasm SIMD instructions as defined [here]. - /// - /// [here]: https://webassembly.github.io/spec/core/binary/instructions.html - #[derive(Debug, Clone, Eq, PartialEq)] - #[allow(missing_docs)] - pub enum SimdOperator { - $( - $op $({ $($payload)* })?, - )* - } - } -} -#[cfg(feature = "simd")] -for_each_visit_simd_operator!(define_simd_operator); +for_each_operator!(define_operator); /// A reader for a core WebAssembly function's operators. #[derive(Clone)] @@ -439,20 +419,15 @@ pub trait VisitOperator<'a> { /// implement [`VisitOperator`] on their own. fn visit_operator(&mut self, op: &Operator<'a>) -> Self::Output { macro_rules! visit_operator { - ($(@$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => { + ($(@$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => {{ match op { - $( - Operator::$op $({ $($arg),* })? => self.$visit($($($arg.clone()),*)?), - )* + $( Operator::$op $({ $($arg),* })? => return self.$visit($($($arg.clone()),*)?), )* #[cfg(feature = "simd")] - Operator::Simd(op) => { - let Some(visitor) = self.simd_visitor() else { - panic!("missing SIMD visitor for: {op:?}") - }; - visitor.visit_simd_operator(op) - } - } - } + _ => {}, + }; + #[cfg(feature = "simd")] + _visit_simd_operator(self, op) + }}; } for_each_visit_operator!(visit_operator) } @@ -488,31 +463,30 @@ pub trait VisitOperator<'a> { for_each_visit_operator!(define_visit_operator); } -/// Trait implemented by types that can visit all [`SimdOperator`] variants. +/// Special handler for visiting `simd` and `relaxed-simd` [`Operator`] variants. #[cfg(feature = "simd")] -#[allow(missing_docs)] -pub trait VisitSimdOperator<'a>: VisitOperator<'a> { - /// Visits the SIMD [`Operator`] `op` using the given `offset`. - /// - /// # Note - /// - /// This is a convenience method that is intended for non-performance - /// critical use cases. For performance critical implementations users - /// are recommended to directly use the respective `visit` methods or - /// implement [`VisitOperator`] on their own. - fn visit_simd_operator(&mut self, op: &SimdOperator) -> Self::Output { - macro_rules! visit_simd_operator { - ($(@$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => { - match op { - $( - SimdOperator::$op $({ $($arg),* })? => self.$visit($($($arg.clone()),*)?), - )* - } +fn _visit_simd_operator<'a, V>(visitor: &mut V, op: &Operator<'a>) -> V::Output +where + V: VisitOperator<'a> + ?Sized, +{ + macro_rules! visit_simd_operator { + ($(@$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => {{ + let Some(simd_visitor) = visitor.simd_visitor() else { + panic!("missing SIMD visitor to visit operator: {op:?}") + }; + match op { + $( Operator::$op $({ $($arg),* })? => simd_visitor.$visit($($($arg.clone()),*)?), )* + unexpected => unreachable!("unexpected non-SIMD operator: {unexpected:?}"), } - } - for_each_visit_simd_operator!(visit_simd_operator) + }}; } + for_each_visit_simd_operator!(visit_simd_operator) +} +/// Trait implemented by types that can visit all Wasm `simd` and `relaxed-simd` [`Operator`]s. +#[cfg(feature = "simd")] +#[allow(missing_docs)] +pub trait VisitSimdOperator<'a>: VisitOperator<'a> { for_each_visit_simd_operator!(define_visit_operator); } @@ -540,9 +514,6 @@ impl<'a, 'b, V: VisitOperator<'a> + ?Sized> VisitOperator<'a> for &'b mut V { #[cfg(feature = "simd")] impl<'a, 'b, V: VisitSimdOperator<'a> + ?Sized> VisitSimdOperator<'a> for &'b mut V { - fn visit_simd_operator(&mut self, op: &SimdOperator) -> Self::Output { - V::visit_simd_operator(*self, op) - } for_each_visit_simd_operator!(define_visit_operator_delegate); } @@ -560,9 +531,6 @@ impl<'a, V: VisitOperator<'a> + ?Sized> VisitOperator<'a> for Box { #[cfg(feature = "simd")] impl<'a, V: VisitSimdOperator<'a> + ?Sized> VisitSimdOperator<'a> for Box { - fn visit_simd_operator(&mut self, op: &SimdOperator) -> Self::Output { - V::visit_simd_operator(&mut *self, op) - } for_each_visit_simd_operator!(define_visit_operator_delegate); } From 941edc97c4a0d330f4a58a1dd08f52d1ca046d51 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 26 Nov 2024 17:28:47 +0100 Subject: [PATCH 64/83] export all internal macro (debug) --- crates/wasmparser/src/lib.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/crates/wasmparser/src/lib.rs b/crates/wasmparser/src/lib.rs index 8638790430..75a2fe6f6e 100644 --- a/crates/wasmparser/src/lib.rs +++ b/crates/wasmparser/src/lib.rs @@ -55,6 +55,7 @@ mod prelude { pub use crate::collections::{IndexMap, Map, Set}; } +#[macro_export] #[doc(hidden)] macro_rules! _for_each_operator_group { ($mac:ident) => { @@ -779,6 +780,7 @@ macro_rules! define_for_each_operator { } )* ) => { + #[macro_export] #[doc(hidden)] macro_rules! _for_each_operator { ($m:ident) => { @@ -834,6 +836,7 @@ macro_rules! define_for_each_non_simd_operator { }; (filter [$($t:tt)*]) => { + #[macro_export] #[doc(hidden)] macro_rules! _for_each_non_simd_operator { ($m:ident) => { @@ -889,6 +892,7 @@ macro_rules! define_for_each_simd_operator { }; (filter [$($t:tt)*]) => { + #[macro_export] #[doc(hidden)] macro_rules! _for_each_simd_operator { ($m:ident) => { @@ -901,6 +905,7 @@ macro_rules! define_for_each_simd_operator { _for_each_operator_group!(define_for_each_simd_operator); #[cfg(feature = "simd")] +#[macro_export] #[doc(hidden)] macro_rules! _for_each_operator_delegate { ($mac:ident) => { @@ -909,6 +914,7 @@ macro_rules! _for_each_operator_delegate { } #[cfg(not(feature = "simd"))] +#[macro_export] #[doc(hidden)] macro_rules! _for_each_operator_delegate { ($mac:ident) => { From 616146e1136d7695407c057f72ede03f340e8ea5 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 26 Nov 2024 17:29:10 +0100 Subject: [PATCH 65/83] wasmprinter: use for_each_[visit[_simd]]_operator --- crates/wasmprinter/src/operator.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/wasmprinter/src/operator.rs b/crates/wasmprinter/src/operator.rs index 2c2a4a5371..5b6f728dfc 100644 --- a/crates/wasmprinter/src/operator.rs +++ b/crates/wasmprinter/src/operator.rs @@ -1394,11 +1394,11 @@ impl<'a> VisitOperator<'a> for PrintOperator<'_, '_, '_, '_> { Some(self) } - wasmparser::for_each_operator!(define_visit); + wasmparser::for_each_visit_operator!(define_visit); } impl<'a> VisitSimdOperator<'a> for PrintOperator<'_, '_, '_, '_> { - wasmparser::for_each_simd_operator!(define_visit); + wasmparser::for_each_visit_simd_operator!(define_visit); } pub trait OpPrinter { From 2740a3168abb2db1cf01ef72d6e4875f8c088674 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 26 Nov 2024 20:43:02 +0100 Subject: [PATCH 66/83] wasm-encoder: use simd feature in dependencies --- crates/wasm-encoder/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/wasm-encoder/Cargo.toml b/crates/wasm-encoder/Cargo.toml index 25d3ee53e0..eea73ba858 100644 --- a/crates/wasm-encoder/Cargo.toml +++ b/crates/wasm-encoder/Cargo.toml @@ -24,12 +24,12 @@ leb128 = { workspace = true } # Enable this dependency to get a bunch of `From for # wasm_encoder::Foo` impls. -wasmparser = { optional = true, workspace = true } +wasmparser = { optional = true, workspace = true, features = ["simd"] } [dev-dependencies] anyhow = { workspace = true } tempfile = "3.2.0" -wasmparser = { path = "../wasmparser", features = ["simd"] } +wasmparser = { path = "../wasmparser" } wasmprinter = { workspace = true } [features] From d7258c892227ac55b748fb2aa7e184a84dbaed80 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 26 Nov 2024 20:43:54 +0100 Subject: [PATCH 67/83] fix wasm-encoder compilation --- crates/wasm-encoder/src/reencode.rs | 23 ----------------------- 1 file changed, 23 deletions(-) diff --git a/crates/wasm-encoder/src/reencode.rs b/crates/wasm-encoder/src/reencode.rs index 299df44301..7de2f05a06 100644 --- a/crates/wasm-encoder/src/reencode.rs +++ b/crates/wasm-encoder/src/reencode.rs @@ -1636,7 +1636,6 @@ pub mod utils { translate_build!(reencoder $op $($($arg)*)?) } )* - wasmparser::Operator::Simd(simd_arg) => simd_instruction(reencoder, simd_arg)?, unexpected => unreachable!("encountered unexpected Wasm operator: {unexpected:?}"), }) }; @@ -1645,28 +1644,6 @@ pub mod utils { wasmparser::for_each_operator!(translate) } - fn simd_instruction<'a, T: ?Sized + Reencode>( - reencoder: &mut T, - arg: wasmparser::SimdOperator, - ) -> Result, Error> { - macro_rules! translate_simd { - ($( @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => { - Ok(match arg { - $( - wasmparser::SimdOperator::$op $({ $($arg),* })? => { - $( - $(let $arg = translate_map!(reencoder $arg $arg);)* - )? - translate_build!(reencoder $op $($($arg)*)?) - } - )* - }) - }; - } - - wasmparser::for_each_simd_operator!(translate_simd) - } - /// Parses the input `section` given from the `wasmparser` crate and adds /// all the code to the `code` section. pub fn parse_code_section( From f289121d9638a0c4b2517e62dea8fa2524379570 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 26 Nov 2024 20:49:47 +0100 Subject: [PATCH 68/83] wasmparser: use macros via crate:: namespace --- crates/wasmparser/src/arity.rs | 2 +- crates/wasmparser/src/binary_reader.rs | 4 ++-- .../wasmparser/src/readers/core/operators.rs | 18 +++++++++--------- crates/wasmparser/src/validator/core.rs | 4 ++-- crates/wasmparser/src/validator/operators.rs | 4 ++-- 5 files changed, 16 insertions(+), 16 deletions(-) diff --git a/crates/wasmparser/src/arity.rs b/crates/wasmparser/src/arity.rs index 6109edd9f5..6c6faf0cb1 100644 --- a/crates/wasmparser/src/arity.rs +++ b/crates/wasmparser/src/arity.rs @@ -253,6 +253,6 @@ impl Operator<'_> { } ); } - for_each_operator!(define_arity) + crate::for_each_operator!(define_arity) } } diff --git a/crates/wasmparser/src/binary_reader.rs b/crates/wasmparser/src/binary_reader.rs index da25850c70..c5bcd5a058 100644 --- a/crates/wasmparser/src/binary_reader.rs +++ b/crates/wasmparser/src/binary_reader.rs @@ -1800,12 +1800,12 @@ impl<'a> VisitOperator<'a> for OperatorFactory<'a> { Some(self) } - for_each_visit_operator!(define_visit_operator); + crate::for_each_visit_operator!(define_visit_operator); } #[cfg(feature = "simd")] impl<'a> VisitSimdOperator<'a> for OperatorFactory<'a> { - for_each_visit_simd_operator!(define_visit_operator); + crate::for_each_visit_simd_operator!(define_visit_operator); } /// Iterator returned from [`BinaryReader::read_iter`]. diff --git a/crates/wasmparser/src/readers/core/operators.rs b/crates/wasmparser/src/readers/core/operators.rs index 1db6b8f5f6..48f81cba2d 100644 --- a/crates/wasmparser/src/readers/core/operators.rs +++ b/crates/wasmparser/src/readers/core/operators.rs @@ -228,7 +228,7 @@ macro_rules! define_operator { } } } -for_each_operator!(define_operator); +crate::for_each_operator!(define_operator); /// A reader for a core WebAssembly function's operators. #[derive(Clone)] @@ -429,7 +429,7 @@ pub trait VisitOperator<'a> { _visit_simd_operator(self, op) }}; } - for_each_visit_operator!(visit_operator) + crate::for_each_visit_operator!(visit_operator) } /// Returns a mutable reference to a [`VisitSimdOperator`] visitor. @@ -460,7 +460,7 @@ pub trait VisitOperator<'a> { None } - for_each_visit_operator!(define_visit_operator); + crate::for_each_visit_operator!(define_visit_operator); } /// Special handler for visiting `simd` and `relaxed-simd` [`Operator`] variants. @@ -480,14 +480,14 @@ where } }}; } - for_each_visit_simd_operator!(visit_simd_operator) + crate::for_each_visit_simd_operator!(visit_simd_operator) } /// Trait implemented by types that can visit all Wasm `simd` and `relaxed-simd` [`Operator`]s. #[cfg(feature = "simd")] #[allow(missing_docs)] pub trait VisitSimdOperator<'a>: VisitOperator<'a> { - for_each_visit_simd_operator!(define_visit_operator); + crate::for_each_visit_simd_operator!(define_visit_operator); } macro_rules! define_visit_operator_delegate { @@ -509,12 +509,12 @@ impl<'a, 'b, V: VisitOperator<'a> + ?Sized> VisitOperator<'a> for &'b mut V { fn simd_visitor(&mut self) -> Option<&mut dyn VisitSimdOperator<'a, Output = V::Output>> { V::simd_visitor(*self) } - for_each_visit_operator!(define_visit_operator_delegate); + crate::for_each_visit_operator!(define_visit_operator_delegate); } #[cfg(feature = "simd")] impl<'a, 'b, V: VisitSimdOperator<'a> + ?Sized> VisitSimdOperator<'a> for &'b mut V { - for_each_visit_simd_operator!(define_visit_operator_delegate); + crate::for_each_visit_simd_operator!(define_visit_operator_delegate); } impl<'a, V: VisitOperator<'a> + ?Sized> VisitOperator<'a> for Box { @@ -526,12 +526,12 @@ impl<'a, V: VisitOperator<'a> + ?Sized> VisitOperator<'a> for Box { fn simd_visitor(&mut self) -> Option<&mut dyn VisitSimdOperator<'a, Output = V::Output>> { V::simd_visitor(&mut *self) } - for_each_visit_operator!(define_visit_operator_delegate); + crate::for_each_visit_operator!(define_visit_operator_delegate); } #[cfg(feature = "simd")] impl<'a, V: VisitSimdOperator<'a> + ?Sized> VisitSimdOperator<'a> for Box { - for_each_visit_simd_operator!(define_visit_operator_delegate); + crate::for_each_visit_simd_operator!(define_visit_operator_delegate); } /// A `try_table` entries representation. diff --git a/crates/wasmparser/src/validator/core.rs b/crates/wasmparser/src/validator/core.rs index e753602104..f0d1d49b28 100644 --- a/crates/wasmparser/src/validator/core.rs +++ b/crates/wasmparser/src/validator/core.rs @@ -531,12 +531,12 @@ impl ModuleState { Some(self) } - for_each_visit_operator!(define_visit_operator); + crate::for_each_visit_operator!(define_visit_operator); } #[cfg(feature = "simd")] impl<'a> VisitSimdOperator<'a> for VisitConstOperator<'a> { - for_each_visit_simd_operator!(define_visit_operator); + crate::for_each_visit_simd_operator!(define_visit_operator); } } } diff --git a/crates/wasmparser/src/validator/operators.rs b/crates/wasmparser/src/validator/operators.rs index 58525378a7..9a3a8c391f 100644 --- a/crates/wasmparser/src/validator/operators.rs +++ b/crates/wasmparser/src/validator/operators.rs @@ -1760,7 +1760,7 @@ where Some(self) } - for_each_visit_operator!(validate_proposal); + crate::for_each_visit_operator!(validate_proposal); } #[cfg(feature = "simd")] @@ -1768,7 +1768,7 @@ impl<'a, T> VisitSimdOperator<'a> for WasmProposalValidator<'_, '_, T> where T: WasmModuleResources, { - for_each_visit_simd_operator!(validate_proposal); + crate::for_each_visit_simd_operator!(validate_proposal); } #[track_caller] From 49455e679943ca7a1aca0a82c25c429163789a45 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 26 Nov 2024 20:55:41 +0100 Subject: [PATCH 69/83] expose macros via use statements --- crates/wasmparser/src/lib.rs | 41 ++++++------------------------------ 1 file changed, 7 insertions(+), 34 deletions(-) diff --git a/crates/wasmparser/src/lib.rs b/crates/wasmparser/src/lib.rs index 75a2fe6f6e..2f24d2453f 100644 --- a/crates/wasmparser/src/lib.rs +++ b/crates/wasmparser/src/lib.rs @@ -55,8 +55,6 @@ mod prelude { pub use crate::collections::{IndexMap, Map, Set}; } -#[macro_export] -#[doc(hidden)] macro_rules! _for_each_operator_group { ($mac:ident) => { $mac! { @@ -904,23 +902,13 @@ macro_rules! define_for_each_simd_operator { #[cfg(feature = "simd")] _for_each_operator_group!(define_for_each_simd_operator); -#[cfg(feature = "simd")] -#[macro_export] +#[cfg(not(feature = "simd"))] #[doc(hidden)] -macro_rules! _for_each_operator_delegate { - ($mac:ident) => { - _for_each_operator! { $mac } - }; -} +pub use _for_each_non_simd_operator as _for_each_operator_delegate; -#[cfg(not(feature = "simd"))] -#[macro_export] +#[cfg(feature = "simd")] #[doc(hidden)] -macro_rules! _for_each_operator_delegate { - ($mac:ident) => { - _for_each_non_simd_operator! { $mac } - }; -} +pub use _for_each_operator as _for_each_operator_delegate; /// Used to implement routines for the `Operator` enum. /// @@ -1078,12 +1066,7 @@ macro_rules! _for_each_operator_delegate { /// // manually define `visit_*` for all MVP operators here /// } /// ``` -#[macro_export] -macro_rules! for_each_operator { - ($mac:ident) => { - _for_each_operator_delegate! { $mac } - }; -} +pub use _for_each_operator_delegate as for_each_operator; /// Used to implement the `VisitOperator` trait. /// @@ -1238,12 +1221,7 @@ macro_rules! for_each_operator { /// // manually define `visit_*` for all MVP operators here /// } /// ``` -#[macro_export] -macro_rules! for_each_visit_operator { - ($mac:ident) => { - _for_each_non_simd_operator! { $mac } - }; -} +pub use _for_each_non_simd_operator as for_each_visit_operator; /// Used to implement the `VisitSimdOperator` trait. /// @@ -1263,12 +1241,7 @@ macro_rules! for_each_visit_operator { /// /// [`VisitSimdOperator`]: crate::VisitSimdOperator #[cfg(feature = "simd")] -#[macro_export] -macro_rules! for_each_visit_simd_operator { - ($mac:ident) => { - _for_each_simd_operator! { $mac } - }; -} +pub use _for_each_simd_operator as for_each_visit_simd_operator; macro_rules! format_err { ($offset:expr, $($arg:tt)*) => { From 3d452dcbfc44d6b2b61f2fa46e94e5b4938ccf04 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 26 Nov 2024 20:59:57 +0100 Subject: [PATCH 70/83] use doc(inline) --- crates/wasmparser/src/lib.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crates/wasmparser/src/lib.rs b/crates/wasmparser/src/lib.rs index 2f24d2453f..2b60b619bc 100644 --- a/crates/wasmparser/src/lib.rs +++ b/crates/wasmparser/src/lib.rs @@ -1066,6 +1066,7 @@ pub use _for_each_operator as _for_each_operator_delegate; /// // manually define `visit_*` for all MVP operators here /// } /// ``` +#[doc(inline)] pub use _for_each_operator_delegate as for_each_operator; /// Used to implement the `VisitOperator` trait. @@ -1221,6 +1222,7 @@ pub use _for_each_operator_delegate as for_each_operator; /// // manually define `visit_*` for all MVP operators here /// } /// ``` +#[doc(inline)] pub use _for_each_non_simd_operator as for_each_visit_operator; /// Used to implement the `VisitSimdOperator` trait. @@ -1241,6 +1243,7 @@ pub use _for_each_non_simd_operator as for_each_visit_operator; /// /// [`VisitSimdOperator`]: crate::VisitSimdOperator #[cfg(feature = "simd")] +#[doc(inline)] pub use _for_each_simd_operator as for_each_visit_simd_operator; macro_rules! format_err { From a2a1fe0940ca8b4ee5a658bab56f3ad4c8944fdd Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 26 Nov 2024 21:00:15 +0100 Subject: [PATCH 71/83] use intra-doc links --- crates/wasmparser/src/lib.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/wasmparser/src/lib.rs b/crates/wasmparser/src/lib.rs index 2b60b619bc..8fca9cb9af 100644 --- a/crates/wasmparser/src/lib.rs +++ b/crates/wasmparser/src/lib.rs @@ -910,7 +910,7 @@ pub use _for_each_non_simd_operator as _for_each_operator_delegate; #[doc(hidden)] pub use _for_each_operator as _for_each_operator_delegate; -/// Used to implement routines for the `Operator` enum. +/// Used to implement routines for the [`Operator`] enum. /// /// A helper macro to conveniently iterate over all opcodes recognized by this /// crate. This can be used to work with either the [`Operator`] enumeration or @@ -1069,7 +1069,7 @@ pub use _for_each_operator as _for_each_operator_delegate; #[doc(inline)] pub use _for_each_operator_delegate as for_each_operator; -/// Used to implement the `VisitOperator` trait. +/// Used to implement the [`VisitOperator`] trait. /// /// A helper macro to conveniently iterate over all opcodes recognized by this /// crate. This can be used to work with either the [`Operator`] enumeration or @@ -1225,7 +1225,7 @@ pub use _for_each_operator_delegate as for_each_operator; #[doc(inline)] pub use _for_each_non_simd_operator as for_each_visit_operator; -/// Used to implement the `VisitSimdOperator` trait. +/// Used to implement the [`VisitSimdOperator`] trait. /// /// The list of specializable Wasm proposals is as follows: /// From bc19615db11bc838743f9c2c2c1d68a6280c6a04 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 26 Nov 2024 21:47:09 +0100 Subject: [PATCH 72/83] wit-component: fix wasmparser usage --- crates/wit-component/Cargo.toml | 2 +- crates/wit-component/src/gc.rs | 10 +++++++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/crates/wit-component/Cargo.toml b/crates/wit-component/Cargo.toml index 87bb285454..0155909957 100644 --- a/crates/wit-component/Cargo.toml +++ b/crates/wit-component/Cargo.toml @@ -20,7 +20,7 @@ workspace = true all-features = true [dependencies] -wasmparser = { workspace = true, features = ['component-model'] } +wasmparser = { workspace = true, features = ['component-model', 'simd'] } wasm-encoder = { workspace = true, features = ["wasmparser"] } wasm-metadata = { workspace = true } wit-parser = { workspace = true, features = ['decoding', 'serde'] } diff --git a/crates/wit-component/src/gc.rs b/crates/wit-component/src/gc.rs index ff4b5648c4..55d3be513b 100644 --- a/crates/wit-component/src/gc.rs +++ b/crates/wit-component/src/gc.rs @@ -991,7 +991,15 @@ macro_rules! define_visit { impl<'a> VisitOperator<'a> for Module<'a> { type Output = (); - wasmparser::for_each_operator!(define_visit); + fn simd_visitor(&mut self) -> Option<&mut dyn VisitSimdOperator<'a, Output = Self::Output>> { + Some(self) + } + + wasmparser::for_each_visit_operator!(define_visit); +} + +impl<'a> VisitSimdOperator<'a> for Module<'a> { + wasmparser::for_each_visit_simd_operator!(define_visit); } /// Helper function to filter `iter` based on the `live` set, yielding an From b7f7d436645f8aa26e78c02b5223bd35612b6fda Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 26 Nov 2024 21:52:50 +0100 Subject: [PATCH 73/83] wasm-mutate: fix wasmparser usage This mostly reverts the changes done in past commits of the PR. --- .../src/mutators/modify_const_exprs.rs | 8 +- .../wasm-mutate/src/mutators/peephole/dfg.rs | 969 ++++++++---------- 2 files changed, 448 insertions(+), 529 deletions(-) diff --git a/crates/wasm-mutate/src/mutators/modify_const_exprs.rs b/crates/wasm-mutate/src/mutators/modify_const_exprs.rs index 9500446150..8051e73adb 100644 --- a/crates/wasm-mutate/src/mutators/modify_const_exprs.rs +++ b/crates/wasm-mutate/src/mutators/modify_const_exprs.rs @@ -5,7 +5,7 @@ use crate::{Error, Mutator, ReencodeResult}; use rand::Rng; use wasm_encoder::reencode::{self, Reencode, RoundtripReencoder}; use wasm_encoder::{ElementSection, GlobalSection}; -use wasmparser::{ConstExpr, ElementSectionReader, GlobalSectionReader, SimdOperator}; +use wasmparser::{ConstExpr, ElementSectionReader, GlobalSectionReader}; #[derive(PartialEq, Copy, Clone)] pub enum ConstExpressionMutator { @@ -76,7 +76,7 @@ impl<'cfg, 'wasm> Reencode for InitTranslator<'cfg, 'wasm> { O::RefNull { .. } | O::I32Const { value: 0 | 1 } | O::I64Const { value: 0 | 1 } => true, O::F32Const { value } => value.bits() == 0, O::F64Const { value } => value.bits() == 0, - O::Simd(SimdOperator::V128Const { value }) => value.i128() == 0, + O::V128Const { value } => value.i128() == 0, _ => false, }; if is_simplest { @@ -86,7 +86,7 @@ impl<'cfg, 'wasm> Reencode for InitTranslator<'cfg, 'wasm> { let ty = match op { O::I32Const { .. } => T::I32, O::I64Const { .. } => T::I64, - O::Simd(SimdOperator::V128Const { .. }) => T::V128, + O::V128Const { .. } => T::V128, O::F32Const { .. } => T::F32, O::F64Const { .. } => T::F64, O::RefFunc { .. } @@ -131,7 +131,7 @@ impl<'cfg, 'wasm> Reencode for InitTranslator<'cfg, 'wasm> { } else { self.config.rng().gen() }), - T::V128 => CE::v128_const(if let O::Simd(SimdOperator::V128Const { value }) = op { + T::V128 => CE::v128_const(if let O::V128Const { value } = op { self.config.rng().gen_range(0..value.i128() as u128) as i128 } else { self.config.rng().gen() diff --git a/crates/wasm-mutate/src/mutators/peephole/dfg.rs b/crates/wasm-mutate/src/mutators/peephole/dfg.rs index e0a2a3d368..786643141f 100644 --- a/crates/wasm-mutate/src/mutators/peephole/dfg.rs +++ b/crates/wasm-mutate/src/mutators/peephole/dfg.rs @@ -11,7 +11,6 @@ use egg::{Id, Language, RecExpr}; use std::collections::HashMap; use std::ops::Range; use wasmparser::Operator; -use wasmparser::SimdOperator; /// It executes a minimal symbolic evaluation of the stack to detect operands /// location in the code for certain operators @@ -370,10 +369,8 @@ impl<'a> DFGBuilder { let (operator, _) = &operators[idx]; // Check if it is not EOF - use Operator as Op; - use SimdOperator as SimdOp; match operator { - Op::Call { function_index } => { + Operator::Call { function_index } => { let typeinfo = info.get_functype_idx(*function_index); match typeinfo { crate::module::TypeInfo::Func(tpe) => { @@ -403,234 +400,234 @@ impl<'a> DFGBuilder { } } } - Op::LocalGet { local_index } => { + Operator::LocalGet { local_index } => { self.push_node(Lang::LocalGet(*local_index), idx); } - Op::GlobalGet { global_index } => { + Operator::GlobalGet { global_index } => { self.push_node(Lang::GlobalGet(*global_index), idx); } - Op::GlobalSet { global_index } => { + Operator::GlobalSet { global_index } => { let child = self.pop_operand(idx, true); self.empty_node(Lang::GlobalSet(*global_index, Id::from(child)), idx); } - Op::I32Const { value } => { + Operator::I32Const { value } => { self.push_node(Lang::I32(*value), idx); } - Op::I64Const { value } => { + Operator::I64Const { value } => { self.push_node(Lang::I64(*value), idx); } - Op::F32Const { value } => { + Operator::F32Const { value } => { self.push_node(Lang::F32((*value).into()), idx); } - Op::F64Const { value } => { + Operator::F64Const { value } => { self.push_node(Lang::F64((*value).into()), idx); } - Op::Simd(SimdOp::V128Const { value }) => { + Operator::V128Const { value } => { self.push_node(Lang::V128(value.i128()), idx); } - Op::LocalSet { local_index } => { + Operator::LocalSet { local_index } => { let val = self.pop_operand(idx, true); self.empty_node(Lang::LocalSet(*local_index, Id::from(val)), idx); } - Op::LocalTee { local_index } => { + Operator::LocalTee { local_index } => { let val = self.pop_operand(idx, true); self.push_node(Lang::LocalTee(*local_index, Id::from(val)), idx); self.new_color(); } - Op::Nop => { + Operator::Nop => { self.empty_node(Lang::Nop, idx); } - Op::I32Store { memarg } => self.store(idx, memarg, Lang::I32Store), - Op::I64Store { memarg } => self.store(idx, memarg, Lang::I64Store), - Op::F32Store { memarg } => self.store(idx, memarg, Lang::F32Store), - Op::F64Store { memarg } => self.store(idx, memarg, Lang::F64Store), - Op::I32Store8 { memarg } => self.store(idx, memarg, Lang::I32Store8), - Op::I32Store16 { memarg } => self.store(idx, memarg, Lang::I32Store16), - Op::I64Store8 { memarg } => self.store(idx, memarg, Lang::I64Store8), - Op::I64Store16 { memarg } => self.store(idx, memarg, Lang::I64Store16), - Op::I64Store32 { memarg } => self.store(idx, memarg, Lang::I64Store32), + Operator::I32Store { memarg } => self.store(idx, memarg, Lang::I32Store), + Operator::I64Store { memarg } => self.store(idx, memarg, Lang::I64Store), + Operator::F32Store { memarg } => self.store(idx, memarg, Lang::F32Store), + Operator::F64Store { memarg } => self.store(idx, memarg, Lang::F64Store), + Operator::I32Store8 { memarg } => self.store(idx, memarg, Lang::I32Store8), + Operator::I32Store16 { memarg } => self.store(idx, memarg, Lang::I32Store16), + Operator::I64Store8 { memarg } => self.store(idx, memarg, Lang::I64Store8), + Operator::I64Store16 { memarg } => self.store(idx, memarg, Lang::I64Store16), + Operator::I64Store32 { memarg } => self.store(idx, memarg, Lang::I64Store32), // All memory loads - Op::I32Load { memarg } => self.load(idx, memarg, Lang::I32Load), - Op::I64Load { memarg } => self.load(idx, memarg, Lang::I64Load), - Op::F32Load { memarg } => self.load(idx, memarg, Lang::F32Load), - Op::F64Load { memarg } => self.load(idx, memarg, Lang::F64Load), - Op::I32Load8S { memarg } => self.load(idx, memarg, Lang::I32Load8S), - Op::I32Load8U { memarg } => self.load(idx, memarg, Lang::I32Load8U), - Op::I32Load16S { memarg } => self.load(idx, memarg, Lang::I32Load16S), - Op::I32Load16U { memarg } => self.load(idx, memarg, Lang::I32Load16U), - Op::I64Load8S { memarg } => self.load(idx, memarg, Lang::I64Load8S), - Op::I64Load8U { memarg } => self.load(idx, memarg, Lang::I64Load8U), - Op::I64Load16S { memarg } => self.load(idx, memarg, Lang::I64Load16S), - Op::I64Load16U { memarg } => self.load(idx, memarg, Lang::I64Load16U), - Op::I64Load32S { memarg } => self.load(idx, memarg, Lang::I64Load32S), - Op::I64Load32U { memarg } => self.load(idx, memarg, Lang::I64Load32U), - - Op::I32Eqz => self.unop(idx, Lang::I32Eqz), - Op::I64Eqz => self.unop(idx, Lang::I64Eqz), - - Op::F32Eq => self.binop(idx, Lang::F32Eq), - Op::F32Ne => self.binop(idx, Lang::F32Ne), - Op::F32Lt => self.binop(idx, Lang::F32Lt), - Op::F32Gt => self.binop(idx, Lang::F32Gt), - Op::F32Le => self.binop(idx, Lang::F32Le), - Op::F32Ge => self.binop(idx, Lang::F32Ge), - - Op::F64Eq => self.binop(idx, Lang::F64Eq), - Op::F64Ne => self.binop(idx, Lang::F64Ne), - Op::F64Lt => self.binop(idx, Lang::F64Lt), - Op::F64Gt => self.binop(idx, Lang::F64Gt), - Op::F64Le => self.binop(idx, Lang::F64Le), - Op::F64Ge => self.binop(idx, Lang::F64Ge), - - Op::I32Clz => self.unop(idx, Lang::I32Clz), - Op::I32Ctz => self.unop(idx, Lang::I32Ctz), - Op::I64Clz => self.unop(idx, Lang::I64Clz), - Op::I64Ctz => self.unop(idx, Lang::I64Ctz), - - Op::F32Abs => self.unop(idx, Lang::F32Abs), - Op::F32Neg => self.unop(idx, Lang::F32Neg), - Op::F32Ceil => self.unop(idx, Lang::F32Ceil), - Op::F32Floor => self.unop(idx, Lang::F32Floor), - Op::F32Trunc => self.unop(idx, Lang::F32Trunc), - Op::F32Nearest => self.unop(idx, Lang::F32Nearest), - Op::F32Sqrt => self.unop(idx, Lang::F32Sqrt), - Op::F32Add => self.binop(idx, Lang::F32Add), - Op::F32Sub => self.binop(idx, Lang::F32Sub), - Op::F32Mul => self.binop(idx, Lang::F32Mul), - Op::F32Div => self.binop(idx, Lang::F32Div), - Op::F32Min => self.binop(idx, Lang::F32Min), - Op::F32Max => self.binop(idx, Lang::F32Max), - Op::F32Copysign => self.binop(idx, Lang::F32Copysign), - - Op::F64Abs => self.unop(idx, Lang::F64Abs), - Op::F64Neg => self.unop(idx, Lang::F64Neg), - Op::F64Ceil => self.unop(idx, Lang::F64Ceil), - Op::F64Floor => self.unop(idx, Lang::F64Floor), - Op::F64Trunc => self.unop(idx, Lang::F64Trunc), - Op::F64Nearest => self.unop(idx, Lang::F64Nearest), - Op::F64Sqrt => self.unop(idx, Lang::F64Sqrt), - Op::F64Add => self.binop(idx, Lang::F64Add), - Op::F64Sub => self.binop(idx, Lang::F64Sub), - Op::F64Mul => self.binop(idx, Lang::F64Mul), - Op::F64Div => self.binop(idx, Lang::F64Div), - Op::F64Min => self.binop(idx, Lang::F64Min), - Op::F64Max => self.binop(idx, Lang::F64Max), - Op::F64Copysign => self.binop(idx, Lang::F64Copysign), - - Op::I32TruncF32S => self.unop(idx, Lang::I32TruncF32S), - Op::I32TruncF32U => self.unop(idx, Lang::I32TruncF32U), - Op::I32TruncF64S => self.unop(idx, Lang::I32TruncF64S), - Op::I32TruncF64U => self.unop(idx, Lang::I32TruncF64U), - Op::I64TruncF32S => self.unop(idx, Lang::I64TruncF32S), - Op::I64TruncF32U => self.unop(idx, Lang::I64TruncF32U), - Op::I64TruncF64S => self.unop(idx, Lang::I64TruncF64S), - Op::I64TruncF64U => self.unop(idx, Lang::I64TruncF64U), - Op::F32ConvertI32S => self.unop(idx, Lang::F32ConvertI32S), - Op::F32ConvertI32U => self.unop(idx, Lang::F32ConvertI32U), - Op::F32ConvertI64S => self.unop(idx, Lang::F32ConvertI64S), - Op::F32ConvertI64U => self.unop(idx, Lang::F32ConvertI64U), - Op::F64ConvertI32S => self.unop(idx, Lang::F64ConvertI32S), - Op::F64ConvertI32U => self.unop(idx, Lang::F64ConvertI32U), - Op::F64ConvertI64S => self.unop(idx, Lang::F64ConvertI64S), - Op::F64ConvertI64U => self.unop(idx, Lang::F64ConvertI64U), - Op::F64PromoteF32 => self.unop(idx, Lang::F64PromoteF32), - Op::F32DemoteF64 => self.unop(idx, Lang::F32DemoteF64), - Op::I32ReinterpretF32 => self.unop(idx, Lang::I32ReinterpretF32), - Op::I64ReinterpretF64 => self.unop(idx, Lang::I64ReinterpretF64), - Op::F32ReinterpretI32 => self.unop(idx, Lang::F32ReinterpretI32), - Op::F64ReinterpretI64 => self.unop(idx, Lang::F64ReinterpretI64), - Op::I32TruncSatF32S => self.unop(idx, Lang::I32TruncSatF32S), - Op::I32TruncSatF32U => self.unop(idx, Lang::I32TruncSatF32U), - Op::I32TruncSatF64S => self.unop(idx, Lang::I32TruncSatF64S), - Op::I32TruncSatF64U => self.unop(idx, Lang::I32TruncSatF64U), - Op::I64TruncSatF32S => self.unop(idx, Lang::I64TruncSatF32S), - Op::I64TruncSatF32U => self.unop(idx, Lang::I64TruncSatF32U), - Op::I64TruncSatF64S => self.unop(idx, Lang::I64TruncSatF64S), - Op::I64TruncSatF64U => self.unop(idx, Lang::I64TruncSatF64U), - - Op::I32Add => self.binop(idx, Lang::I32Add), - Op::I32Sub => self.binop(idx, Lang::I32Sub), - Op::I32Eq => self.binop(idx, Lang::I32Eq), - Op::I32Ne => self.binop(idx, Lang::I32Ne), - Op::I32LtS => self.binop(idx, Lang::I32LtS), - Op::I32LtU => self.binop(idx, Lang::I32LtU), - Op::I32GtS => self.binop(idx, Lang::I32GtS), - Op::I32GtU => self.binop(idx, Lang::I32GtU), - Op::I32LeS => self.binop(idx, Lang::I32LeS), - Op::I32LeU => self.binop(idx, Lang::I32LeU), - Op::I32GeS => self.binop(idx, Lang::I32GeS), - Op::I32GeU => self.binop(idx, Lang::I32GeU), - Op::I32Mul => self.binop(idx, Lang::I32Mul), - Op::I32DivS => self.binop(idx, Lang::I32DivS), - Op::I32DivU => self.binop(idx, Lang::I32DivU), - Op::I32RemS => self.binop(idx, Lang::I32RemS), - Op::I32RemU => self.binop(idx, Lang::I32RemU), - Op::I32Shl => self.binop(idx, Lang::I32Shl), - Op::I32ShrS => self.binop(idx, Lang::I32ShrS), - Op::I32ShrU => self.binop(idx, Lang::I32ShrU), - Op::I32Xor => self.binop(idx, Lang::I32Xor), - Op::I32Or => self.binop(idx, Lang::I32Or), - Op::I32And => self.binop(idx, Lang::I32And), - Op::I32Rotl => self.binop(idx, Lang::I32RotL), - Op::I32Rotr => self.binop(idx, Lang::I32RotR), - - Op::I64Add => self.binop(idx, Lang::I64Add), - Op::I64Sub => self.binop(idx, Lang::I64Sub), - Op::I64Eq => self.binop(idx, Lang::I64Eq), - Op::I64Ne => self.binop(idx, Lang::I64Ne), - Op::I64LtS => self.binop(idx, Lang::I64LtS), - Op::I64LtU => self.binop(idx, Lang::I64LtU), - Op::I64GtS => self.binop(idx, Lang::I64GtS), - Op::I64GtU => self.binop(idx, Lang::I64GtU), - Op::I64LeS => self.binop(idx, Lang::I64LeS), - Op::I64LeU => self.binop(idx, Lang::I64LeU), - Op::I64GeS => self.binop(idx, Lang::I64GeS), - Op::I64GeU => self.binop(idx, Lang::I64GeU), - Op::I64Mul => self.binop(idx, Lang::I64Mul), - Op::I64DivS => self.binop(idx, Lang::I64DivS), - Op::I64DivU => self.binop(idx, Lang::I64DivU), - Op::I64RemS => self.binop(idx, Lang::I64RemS), - Op::I64RemU => self.binop(idx, Lang::I64RemU), - Op::I64Shl => self.binop(idx, Lang::I64Shl), - Op::I64ShrS => self.binop(idx, Lang::I64ShrS), - Op::I64ShrU => self.binop(idx, Lang::I64ShrU), - Op::I64Xor => self.binop(idx, Lang::I64Xor), - Op::I64Or => self.binop(idx, Lang::I64Or), - Op::I64And => self.binop(idx, Lang::I64And), - Op::I64Rotl => self.binop(idx, Lang::I64RotL), - Op::I64Rotr => self.binop(idx, Lang::I64RotR), - - Op::Simd(SimdOp::V128Not) => self.unop(idx, Lang::V128Not), - Op::Simd(SimdOp::V128And) => self.binop(idx, Lang::V128And), - Op::Simd(SimdOp::V128AndNot) => self.binop(idx, Lang::V128AndNot), - Op::Simd(SimdOp::V128Or) => self.binop(idx, Lang::V128Or), - Op::Simd(SimdOp::V128Xor) => self.binop(idx, Lang::V128Xor), - Op::Simd(SimdOp::V128AnyTrue) => self.unop(idx, Lang::V128AnyTrue), - Op::Simd(SimdOp::V128Bitselect) => self.ternop(idx, Lang::V128Bitselect), - - Op::Drop => { + Operator::I32Load { memarg } => self.load(idx, memarg, Lang::I32Load), + Operator::I64Load { memarg } => self.load(idx, memarg, Lang::I64Load), + Operator::F32Load { memarg } => self.load(idx, memarg, Lang::F32Load), + Operator::F64Load { memarg } => self.load(idx, memarg, Lang::F64Load), + Operator::I32Load8S { memarg } => self.load(idx, memarg, Lang::I32Load8S), + Operator::I32Load8U { memarg } => self.load(idx, memarg, Lang::I32Load8U), + Operator::I32Load16S { memarg } => self.load(idx, memarg, Lang::I32Load16S), + Operator::I32Load16U { memarg } => self.load(idx, memarg, Lang::I32Load16U), + Operator::I64Load8S { memarg } => self.load(idx, memarg, Lang::I64Load8S), + Operator::I64Load8U { memarg } => self.load(idx, memarg, Lang::I64Load8U), + Operator::I64Load16S { memarg } => self.load(idx, memarg, Lang::I64Load16S), + Operator::I64Load16U { memarg } => self.load(idx, memarg, Lang::I64Load16U), + Operator::I64Load32S { memarg } => self.load(idx, memarg, Lang::I64Load32S), + Operator::I64Load32U { memarg } => self.load(idx, memarg, Lang::I64Load32U), + + Operator::I32Eqz => self.unop(idx, Lang::I32Eqz), + Operator::I64Eqz => self.unop(idx, Lang::I64Eqz), + + Operator::F32Eq => self.binop(idx, Lang::F32Eq), + Operator::F32Ne => self.binop(idx, Lang::F32Ne), + Operator::F32Lt => self.binop(idx, Lang::F32Lt), + Operator::F32Gt => self.binop(idx, Lang::F32Gt), + Operator::F32Le => self.binop(idx, Lang::F32Le), + Operator::F32Ge => self.binop(idx, Lang::F32Ge), + + Operator::F64Eq => self.binop(idx, Lang::F64Eq), + Operator::F64Ne => self.binop(idx, Lang::F64Ne), + Operator::F64Lt => self.binop(idx, Lang::F64Lt), + Operator::F64Gt => self.binop(idx, Lang::F64Gt), + Operator::F64Le => self.binop(idx, Lang::F64Le), + Operator::F64Ge => self.binop(idx, Lang::F64Ge), + + Operator::I32Clz => self.unop(idx, Lang::I32Clz), + Operator::I32Ctz => self.unop(idx, Lang::I32Ctz), + Operator::I64Clz => self.unop(idx, Lang::I64Clz), + Operator::I64Ctz => self.unop(idx, Lang::I64Ctz), + + Operator::F32Abs => self.unop(idx, Lang::F32Abs), + Operator::F32Neg => self.unop(idx, Lang::F32Neg), + Operator::F32Ceil => self.unop(idx, Lang::F32Ceil), + Operator::F32Floor => self.unop(idx, Lang::F32Floor), + Operator::F32Trunc => self.unop(idx, Lang::F32Trunc), + Operator::F32Nearest => self.unop(idx, Lang::F32Nearest), + Operator::F32Sqrt => self.unop(idx, Lang::F32Sqrt), + Operator::F32Add => self.binop(idx, Lang::F32Add), + Operator::F32Sub => self.binop(idx, Lang::F32Sub), + Operator::F32Mul => self.binop(idx, Lang::F32Mul), + Operator::F32Div => self.binop(idx, Lang::F32Div), + Operator::F32Min => self.binop(idx, Lang::F32Min), + Operator::F32Max => self.binop(idx, Lang::F32Max), + Operator::F32Copysign => self.binop(idx, Lang::F32Copysign), + + Operator::F64Abs => self.unop(idx, Lang::F64Abs), + Operator::F64Neg => self.unop(idx, Lang::F64Neg), + Operator::F64Ceil => self.unop(idx, Lang::F64Ceil), + Operator::F64Floor => self.unop(idx, Lang::F64Floor), + Operator::F64Trunc => self.unop(idx, Lang::F64Trunc), + Operator::F64Nearest => self.unop(idx, Lang::F64Nearest), + Operator::F64Sqrt => self.unop(idx, Lang::F64Sqrt), + Operator::F64Add => self.binop(idx, Lang::F64Add), + Operator::F64Sub => self.binop(idx, Lang::F64Sub), + Operator::F64Mul => self.binop(idx, Lang::F64Mul), + Operator::F64Div => self.binop(idx, Lang::F64Div), + Operator::F64Min => self.binop(idx, Lang::F64Min), + Operator::F64Max => self.binop(idx, Lang::F64Max), + Operator::F64Copysign => self.binop(idx, Lang::F64Copysign), + + Operator::I32TruncF32S => self.unop(idx, Lang::I32TruncF32S), + Operator::I32TruncF32U => self.unop(idx, Lang::I32TruncF32U), + Operator::I32TruncF64S => self.unop(idx, Lang::I32TruncF64S), + Operator::I32TruncF64U => self.unop(idx, Lang::I32TruncF64U), + Operator::I64TruncF32S => self.unop(idx, Lang::I64TruncF32S), + Operator::I64TruncF32U => self.unop(idx, Lang::I64TruncF32U), + Operator::I64TruncF64S => self.unop(idx, Lang::I64TruncF64S), + Operator::I64TruncF64U => self.unop(idx, Lang::I64TruncF64U), + Operator::F32ConvertI32S => self.unop(idx, Lang::F32ConvertI32S), + Operator::F32ConvertI32U => self.unop(idx, Lang::F32ConvertI32U), + Operator::F32ConvertI64S => self.unop(idx, Lang::F32ConvertI64S), + Operator::F32ConvertI64U => self.unop(idx, Lang::F32ConvertI64U), + Operator::F64ConvertI32S => self.unop(idx, Lang::F64ConvertI32S), + Operator::F64ConvertI32U => self.unop(idx, Lang::F64ConvertI32U), + Operator::F64ConvertI64S => self.unop(idx, Lang::F64ConvertI64S), + Operator::F64ConvertI64U => self.unop(idx, Lang::F64ConvertI64U), + Operator::F64PromoteF32 => self.unop(idx, Lang::F64PromoteF32), + Operator::F32DemoteF64 => self.unop(idx, Lang::F32DemoteF64), + Operator::I32ReinterpretF32 => self.unop(idx, Lang::I32ReinterpretF32), + Operator::I64ReinterpretF64 => self.unop(idx, Lang::I64ReinterpretF64), + Operator::F32ReinterpretI32 => self.unop(idx, Lang::F32ReinterpretI32), + Operator::F64ReinterpretI64 => self.unop(idx, Lang::F64ReinterpretI64), + Operator::I32TruncSatF32S => self.unop(idx, Lang::I32TruncSatF32S), + Operator::I32TruncSatF32U => self.unop(idx, Lang::I32TruncSatF32U), + Operator::I32TruncSatF64S => self.unop(idx, Lang::I32TruncSatF64S), + Operator::I32TruncSatF64U => self.unop(idx, Lang::I32TruncSatF64U), + Operator::I64TruncSatF32S => self.unop(idx, Lang::I64TruncSatF32S), + Operator::I64TruncSatF32U => self.unop(idx, Lang::I64TruncSatF32U), + Operator::I64TruncSatF64S => self.unop(idx, Lang::I64TruncSatF64S), + Operator::I64TruncSatF64U => self.unop(idx, Lang::I64TruncSatF64U), + + Operator::I32Add => self.binop(idx, Lang::I32Add), + Operator::I32Sub => self.binop(idx, Lang::I32Sub), + Operator::I32Eq => self.binop(idx, Lang::I32Eq), + Operator::I32Ne => self.binop(idx, Lang::I32Ne), + Operator::I32LtS => self.binop(idx, Lang::I32LtS), + Operator::I32LtU => self.binop(idx, Lang::I32LtU), + Operator::I32GtS => self.binop(idx, Lang::I32GtS), + Operator::I32GtU => self.binop(idx, Lang::I32GtU), + Operator::I32LeS => self.binop(idx, Lang::I32LeS), + Operator::I32LeU => self.binop(idx, Lang::I32LeU), + Operator::I32GeS => self.binop(idx, Lang::I32GeS), + Operator::I32GeU => self.binop(idx, Lang::I32GeU), + Operator::I32Mul => self.binop(idx, Lang::I32Mul), + Operator::I32DivS => self.binop(idx, Lang::I32DivS), + Operator::I32DivU => self.binop(idx, Lang::I32DivU), + Operator::I32RemS => self.binop(idx, Lang::I32RemS), + Operator::I32RemU => self.binop(idx, Lang::I32RemU), + Operator::I32Shl => self.binop(idx, Lang::I32Shl), + Operator::I32ShrS => self.binop(idx, Lang::I32ShrS), + Operator::I32ShrU => self.binop(idx, Lang::I32ShrU), + Operator::I32Xor => self.binop(idx, Lang::I32Xor), + Operator::I32Or => self.binop(idx, Lang::I32Or), + Operator::I32And => self.binop(idx, Lang::I32And), + Operator::I32Rotl => self.binop(idx, Lang::I32RotL), + Operator::I32Rotr => self.binop(idx, Lang::I32RotR), + + Operator::I64Add => self.binop(idx, Lang::I64Add), + Operator::I64Sub => self.binop(idx, Lang::I64Sub), + Operator::I64Eq => self.binop(idx, Lang::I64Eq), + Operator::I64Ne => self.binop(idx, Lang::I64Ne), + Operator::I64LtS => self.binop(idx, Lang::I64LtS), + Operator::I64LtU => self.binop(idx, Lang::I64LtU), + Operator::I64GtS => self.binop(idx, Lang::I64GtS), + Operator::I64GtU => self.binop(idx, Lang::I64GtU), + Operator::I64LeS => self.binop(idx, Lang::I64LeS), + Operator::I64LeU => self.binop(idx, Lang::I64LeU), + Operator::I64GeS => self.binop(idx, Lang::I64GeS), + Operator::I64GeU => self.binop(idx, Lang::I64GeU), + Operator::I64Mul => self.binop(idx, Lang::I64Mul), + Operator::I64DivS => self.binop(idx, Lang::I64DivS), + Operator::I64DivU => self.binop(idx, Lang::I64DivU), + Operator::I64RemS => self.binop(idx, Lang::I64RemS), + Operator::I64RemU => self.binop(idx, Lang::I64RemU), + Operator::I64Shl => self.binop(idx, Lang::I64Shl), + Operator::I64ShrS => self.binop(idx, Lang::I64ShrS), + Operator::I64ShrU => self.binop(idx, Lang::I64ShrU), + Operator::I64Xor => self.binop(idx, Lang::I64Xor), + Operator::I64Or => self.binop(idx, Lang::I64Or), + Operator::I64And => self.binop(idx, Lang::I64And), + Operator::I64Rotl => self.binop(idx, Lang::I64RotL), + Operator::I64Rotr => self.binop(idx, Lang::I64RotR), + + Operator::V128Not => self.unop(idx, Lang::V128Not), + Operator::V128And => self.binop(idx, Lang::V128And), + Operator::V128AndNot => self.binop(idx, Lang::V128AndNot), + Operator::V128Or => self.binop(idx, Lang::V128Or), + Operator::V128Xor => self.binop(idx, Lang::V128Xor), + Operator::V128AnyTrue => self.unop(idx, Lang::V128AnyTrue), + Operator::V128Bitselect => self.ternop(idx, Lang::V128Bitselect), + + Operator::Drop => { let arg = self.pop_operand(idx, false); self.empty_node(Lang::Drop([Id::from(arg)]), idx); } // conversion between integers - Op::I32WrapI64 => self.unop(idx, Lang::Wrap), - Op::I32Extend8S => self.unop(idx, Lang::I32Extend8S), - Op::I32Extend16S => self.unop(idx, Lang::I32Extend16S), + Operator::I32WrapI64 => self.unop(idx, Lang::Wrap), + Operator::I32Extend8S => self.unop(idx, Lang::I32Extend8S), + Operator::I32Extend16S => self.unop(idx, Lang::I32Extend16S), - Op::I64Extend8S => self.unop(idx, Lang::I64Extend8S), - Op::I64Extend16S => self.unop(idx, Lang::I64Extend16S), - Op::I64Extend32S => self.unop(idx, Lang::I64Extend32S), - Op::I64ExtendI32S => self.unop(idx, Lang::I64ExtendI32S), - Op::I64ExtendI32U => self.unop(idx, Lang::I64ExtendI32U), + Operator::I64Extend8S => self.unop(idx, Lang::I64Extend8S), + Operator::I64Extend16S => self.unop(idx, Lang::I64Extend16S), + Operator::I64Extend32S => self.unop(idx, Lang::I64Extend32S), + Operator::I64ExtendI32S => self.unop(idx, Lang::I64ExtendI32S), + Operator::I64ExtendI32U => self.unop(idx, Lang::I64ExtendI32U), - Op::I32Popcnt => self.unop(idx, Lang::I32Popcnt), - Op::I64Popcnt => self.unop(idx, Lang::I64Popcnt), + Operator::I32Popcnt => self.unop(idx, Lang::I32Popcnt), + Operator::I64Popcnt => self.unop(idx, Lang::I64Popcnt), - Op::Select => { + Operator::Select => { let condition = self.pop_operand(idx, false); let alternative = self.pop_operand(idx, false); let consequent = self.pop_operand(idx, false); @@ -643,14 +640,14 @@ impl<'a> DFGBuilder { idx, ); } - Op::MemoryGrow { mem } => { + Operator::MemoryGrow { mem } => { let arg = self.pop_operand(idx, false); self.push_node(Lang::MemoryGrow(*mem, Id::from(arg)), idx); } - Op::MemorySize { mem } => { + Operator::MemorySize { mem } => { self.push_node(Lang::MemorySize(*mem), idx); } - Op::TableGrow { table } => { + Operator::TableGrow { table } => { let elem = self.pop_operand(idx, false); let size = self.pop_operand(idx, false); self.push_node( @@ -658,19 +655,19 @@ impl<'a> DFGBuilder { idx, ); } - Op::TableSize { table } => { + Operator::TableSize { table } => { self.push_node(Lang::TableSize(*table), idx); } - Op::DataDrop { data_index } => { + Operator::DataDrop { data_index } => { self.empty_node(Lang::DataDrop(*data_index), idx); } - Op::ElemDrop { elem_index } => { + Operator::ElemDrop { elem_index } => { self.empty_node(Lang::ElemDrop(*elem_index), idx); } - Op::MemoryInit { mem, data_index } => { + Operator::MemoryInit { mem, data_index } => { let a = Id::from(self.pop_operand(idx, false)); let b = Id::from(self.pop_operand(idx, false)); let c = Id::from(self.pop_operand(idx, false)); @@ -685,7 +682,7 @@ impl<'a> DFGBuilder { idx, ); } - Op::MemoryCopy { src_mem, dst_mem } => { + Operator::MemoryCopy { src_mem, dst_mem } => { let a = Id::from(self.pop_operand(idx, false)); let b = Id::from(self.pop_operand(idx, false)); let c = Id::from(self.pop_operand(idx, false)); @@ -701,14 +698,14 @@ impl<'a> DFGBuilder { ); } - Op::MemoryFill { mem } => { + Operator::MemoryFill { mem } => { let a = Id::from(self.pop_operand(idx, false)); let b = Id::from(self.pop_operand(idx, false)); let c = Id::from(self.pop_operand(idx, false)); self.empty_node(Lang::MemoryFill(*mem, [c, b, a]), idx); } - Op::TableInit { table, elem_index } => { + Operator::TableInit { table, elem_index } => { let a = Id::from(self.pop_operand(idx, false)); let b = Id::from(self.pop_operand(idx, false)); let c = Id::from(self.pop_operand(idx, false)); @@ -723,7 +720,7 @@ impl<'a> DFGBuilder { idx, ); } - Op::TableCopy { + Operator::TableCopy { src_table, dst_table, } => { @@ -742,420 +739,342 @@ impl<'a> DFGBuilder { ); } - Op::TableFill { table } => { + Operator::TableFill { table } => { let a = Id::from(self.pop_operand(idx, false)); let b = Id::from(self.pop_operand(idx, false)); let c = Id::from(self.pop_operand(idx, false)); self.empty_node(Lang::TableFill(*table, [c, b, a]), idx); } - Op::TableGet { table } => { + Operator::TableGet { table } => { let arg = Id::from(self.pop_operand(idx, false)); self.push_node(Lang::TableGet(*table, arg), idx); } - Op::TableSet { table } => { + Operator::TableSet { table } => { let arg1 = Id::from(self.pop_operand(idx, false)); let arg2 = Id::from(self.pop_operand(idx, false)); self.empty_node(Lang::TableSet(*table, [arg2, arg1]), idx); } - Op::RefNull { + Operator::RefNull { hty: wasmparser::HeapType::EXTERN, } => { self.push_node(Lang::RefNull(RefType::Extern), idx); } - Op::RefNull { + Operator::RefNull { hty: wasmparser::HeapType::FUNC, } => { self.push_node(Lang::RefNull(RefType::Func), idx); } - Op::RefFunc { function_index } => { + Operator::RefFunc { function_index } => { self.push_node(Lang::RefFunc(*function_index), idx); } - Op::RefIsNull => { + Operator::RefIsNull => { let arg = Id::from(self.pop_operand(idx, false)); self.push_node(Lang::RefIsNull(arg), idx); } - Op::Simd(SimdOp::V128Load { memarg }) => self.load(idx, memarg, Lang::V128Load), - Op::Simd(SimdOp::V128Load8x8S { memarg }) => { - self.load(idx, memarg, Lang::V128Load8x8S) - } - Op::Simd(SimdOp::V128Load8x8U { memarg }) => { - self.load(idx, memarg, Lang::V128Load8x8U) - } - Op::Simd(SimdOp::V128Load16x4S { memarg }) => { - self.load(idx, memarg, Lang::V128Load16x4S) - } - Op::Simd(SimdOp::V128Load16x4U { memarg }) => { - self.load(idx, memarg, Lang::V128Load16x4U) - } - Op::Simd(SimdOp::V128Load32x2S { memarg }) => { - self.load(idx, memarg, Lang::V128Load32x2S) - } - Op::Simd(SimdOp::V128Load32x2U { memarg }) => { - self.load(idx, memarg, Lang::V128Load32x2U) - } - Op::Simd(SimdOp::V128Load8Splat { memarg }) => { - self.load(idx, memarg, Lang::V128Load8Splat) - } - Op::Simd(SimdOp::V128Load16Splat { memarg }) => { + Operator::V128Load { memarg } => self.load(idx, memarg, Lang::V128Load), + Operator::V128Load8x8S { memarg } => self.load(idx, memarg, Lang::V128Load8x8S), + Operator::V128Load8x8U { memarg } => self.load(idx, memarg, Lang::V128Load8x8U), + Operator::V128Load16x4S { memarg } => self.load(idx, memarg, Lang::V128Load16x4S), + Operator::V128Load16x4U { memarg } => self.load(idx, memarg, Lang::V128Load16x4U), + Operator::V128Load32x2S { memarg } => self.load(idx, memarg, Lang::V128Load32x2S), + Operator::V128Load32x2U { memarg } => self.load(idx, memarg, Lang::V128Load32x2U), + Operator::V128Load8Splat { memarg } => self.load(idx, memarg, Lang::V128Load8Splat), + Operator::V128Load16Splat { memarg } => { self.load(idx, memarg, Lang::V128Load16Splat) } - Op::Simd(SimdOp::V128Load32Splat { memarg }) => { + Operator::V128Load32Splat { memarg } => { self.load(idx, memarg, Lang::V128Load32Splat) } - Op::Simd(SimdOp::V128Load64Splat { memarg }) => { + Operator::V128Load64Splat { memarg } => { self.load(idx, memarg, Lang::V128Load64Splat) } - Op::Simd(SimdOp::V128Load32Zero { memarg }) => { - self.load(idx, memarg, Lang::V128Load32Zero) - } - Op::Simd(SimdOp::V128Load64Zero { memarg }) => { - self.load(idx, memarg, Lang::V128Load64Zero) - } - Op::Simd(SimdOp::V128Store { memarg }) => self.store(idx, memarg, Lang::V128Store), - Op::Simd(SimdOp::V128Load8Lane { memarg, lane }) => { + Operator::V128Load32Zero { memarg } => self.load(idx, memarg, Lang::V128Load32Zero), + Operator::V128Load64Zero { memarg } => self.load(idx, memarg, Lang::V128Load64Zero), + Operator::V128Store { memarg } => self.store(idx, memarg, Lang::V128Store), + Operator::V128Load8Lane { memarg, lane } => { self.load_lane(idx, memarg, lane, Lang::V128Load8Lane) } - Op::Simd(SimdOp::V128Load16Lane { memarg, lane }) => { + Operator::V128Load16Lane { memarg, lane } => { self.load_lane(idx, memarg, lane, Lang::V128Load16Lane) } - Op::Simd(SimdOp::V128Load32Lane { memarg, lane }) => { + Operator::V128Load32Lane { memarg, lane } => { self.load_lane(idx, memarg, lane, Lang::V128Load32Lane) } - Op::Simd(SimdOp::V128Load64Lane { memarg, lane }) => { + Operator::V128Load64Lane { memarg, lane } => { self.load_lane(idx, memarg, lane, Lang::V128Load64Lane) } - Op::Simd(SimdOp::V128Store8Lane { memarg, lane }) => { + Operator::V128Store8Lane { memarg, lane } => { self.store_lane(idx, memarg, lane, Lang::V128Store8Lane) } - Op::Simd(SimdOp::V128Store16Lane { memarg, lane }) => { + Operator::V128Store16Lane { memarg, lane } => { self.store_lane(idx, memarg, lane, Lang::V128Store16Lane) } - Op::Simd(SimdOp::V128Store32Lane { memarg, lane }) => { + Operator::V128Store32Lane { memarg, lane } => { self.store_lane(idx, memarg, lane, Lang::V128Store32Lane) } - Op::Simd(SimdOp::V128Store64Lane { memarg, lane }) => { + Operator::V128Store64Lane { memarg, lane } => { self.store_lane(idx, memarg, lane, Lang::V128Store64Lane) } - Op::Simd(SimdOp::I8x16ExtractLaneS { lane }) => { + Operator::I8x16ExtractLaneS { lane } => { self.extract_lane(idx, lane, Lang::I8x16ExtractLaneS) } - Op::Simd(SimdOp::I8x16ExtractLaneU { lane }) => { + Operator::I8x16ExtractLaneU { lane } => { self.extract_lane(idx, lane, Lang::I8x16ExtractLaneU) } - Op::Simd(SimdOp::I8x16ReplaceLane { lane }) => { + Operator::I8x16ReplaceLane { lane } => { self.replace_lane(idx, lane, Lang::I8x16ReplaceLane) } - Op::Simd(SimdOp::I16x8ExtractLaneS { lane }) => { + Operator::I16x8ExtractLaneS { lane } => { self.extract_lane(idx, lane, Lang::I16x8ExtractLaneS) } - Op::Simd(SimdOp::I16x8ExtractLaneU { lane }) => { + Operator::I16x8ExtractLaneU { lane } => { self.extract_lane(idx, lane, Lang::I16x8ExtractLaneU) } - Op::Simd(SimdOp::I16x8ReplaceLane { lane }) => { + Operator::I16x8ReplaceLane { lane } => { self.replace_lane(idx, lane, Lang::I16x8ReplaceLane) } - Op::Simd(SimdOp::I32x4ExtractLane { lane }) => { + Operator::I32x4ExtractLane { lane } => { self.extract_lane(idx, lane, Lang::I32x4ExtractLane) } - Op::Simd(SimdOp::I32x4ReplaceLane { lane }) => { + Operator::I32x4ReplaceLane { lane } => { self.replace_lane(idx, lane, Lang::I32x4ReplaceLane) } - Op::Simd(SimdOp::I64x2ExtractLane { lane }) => { + Operator::I64x2ExtractLane { lane } => { self.extract_lane(idx, lane, Lang::I64x2ExtractLane) } - Op::Simd(SimdOp::I64x2ReplaceLane { lane }) => { + Operator::I64x2ReplaceLane { lane } => { self.replace_lane(idx, lane, Lang::I64x2ReplaceLane) } - Op::Simd(SimdOp::F32x4ExtractLane { lane }) => { + Operator::F32x4ExtractLane { lane } => { self.extract_lane(idx, lane, Lang::F32x4ExtractLane) } - Op::Simd(SimdOp::F32x4ReplaceLane { lane }) => { + Operator::F32x4ReplaceLane { lane } => { self.replace_lane(idx, lane, Lang::F32x4ReplaceLane) } - Op::Simd(SimdOp::F64x2ExtractLane { lane }) => { + Operator::F64x2ExtractLane { lane } => { self.extract_lane(idx, lane, Lang::F64x2ExtractLane) } - Op::Simd(SimdOp::F64x2ReplaceLane { lane }) => { + Operator::F64x2ReplaceLane { lane } => { self.replace_lane(idx, lane, Lang::F64x2ReplaceLane) } - Op::Simd(SimdOp::I8x16Swizzle) => self.binop(idx, Lang::I8x16Swizzle), - Op::Simd(SimdOp::I8x16Shuffle { lanes }) => { + Operator::I8x16Swizzle => self.binop(idx, Lang::I8x16Swizzle), + Operator::I8x16Shuffle { lanes } => { let a = Id::from(self.pop_operand(idx, false)); let b = Id::from(self.pop_operand(idx, false)); self.push_node(Lang::I8x16Shuffle(Shuffle { indices: *lanes }, [b, a]), idx); } - Op::Simd(SimdOp::I8x16Splat) => self.unop(idx, Lang::I8x16Splat), - Op::Simd(SimdOp::I16x8Splat) => self.unop(idx, Lang::I16x8Splat), - Op::Simd(SimdOp::I32x4Splat) => self.unop(idx, Lang::I32x4Splat), - Op::Simd(SimdOp::I64x2Splat) => self.unop(idx, Lang::I64x2Splat), - Op::Simd(SimdOp::F32x4Splat) => self.unop(idx, Lang::F32x4Splat), - Op::Simd(SimdOp::F64x2Splat) => self.unop(idx, Lang::F64x2Splat), - - Op::Simd(SimdOp::I8x16Eq) => self.binop(idx, Lang::I8x16Eq), - Op::Simd(SimdOp::I8x16Ne) => self.binop(idx, Lang::I8x16Ne), - Op::Simd(SimdOp::I8x16LtS) => self.binop(idx, Lang::I8x16LtS), - Op::Simd(SimdOp::I8x16LtU) => self.binop(idx, Lang::I8x16LtU), - Op::Simd(SimdOp::I8x16GtS) => self.binop(idx, Lang::I8x16GtS), - Op::Simd(SimdOp::I8x16GtU) => self.binop(idx, Lang::I8x16GtU), - Op::Simd(SimdOp::I8x16LeS) => self.binop(idx, Lang::I8x16LeS), - Op::Simd(SimdOp::I8x16LeU) => self.binop(idx, Lang::I8x16LeU), - Op::Simd(SimdOp::I8x16GeS) => self.binop(idx, Lang::I8x16GeS), - Op::Simd(SimdOp::I8x16GeU) => self.binop(idx, Lang::I8x16GeU), - Op::Simd(SimdOp::I16x8Eq) => self.binop(idx, Lang::I16x8Eq), - Op::Simd(SimdOp::I16x8Ne) => self.binop(idx, Lang::I16x8Ne), - Op::Simd(SimdOp::I16x8LtS) => self.binop(idx, Lang::I16x8LtS), - Op::Simd(SimdOp::I16x8LtU) => self.binop(idx, Lang::I16x8LtU), - Op::Simd(SimdOp::I16x8GtS) => self.binop(idx, Lang::I16x8GtS), - Op::Simd(SimdOp::I16x8GtU) => self.binop(idx, Lang::I16x8GtU), - Op::Simd(SimdOp::I16x8LeS) => self.binop(idx, Lang::I16x8LeS), - Op::Simd(SimdOp::I16x8LeU) => self.binop(idx, Lang::I16x8LeU), - Op::Simd(SimdOp::I16x8GeS) => self.binop(idx, Lang::I16x8GeS), - Op::Simd(SimdOp::I16x8GeU) => self.binop(idx, Lang::I16x8GeU), - Op::Simd(SimdOp::I32x4Eq) => self.binop(idx, Lang::I32x4Eq), - Op::Simd(SimdOp::I32x4Ne) => self.binop(idx, Lang::I32x4Ne), - Op::Simd(SimdOp::I32x4LtS) => self.binop(idx, Lang::I32x4LtS), - Op::Simd(SimdOp::I32x4LtU) => self.binop(idx, Lang::I32x4LtU), - Op::Simd(SimdOp::I32x4GtS) => self.binop(idx, Lang::I32x4GtS), - Op::Simd(SimdOp::I32x4GtU) => self.binop(idx, Lang::I32x4GtU), - Op::Simd(SimdOp::I32x4LeS) => self.binop(idx, Lang::I32x4LeS), - Op::Simd(SimdOp::I32x4LeU) => self.binop(idx, Lang::I32x4LeU), - Op::Simd(SimdOp::I32x4GeS) => self.binop(idx, Lang::I32x4GeS), - Op::Simd(SimdOp::I32x4GeU) => self.binop(idx, Lang::I32x4GeU), - Op::Simd(SimdOp::I64x2Eq) => self.binop(idx, Lang::I64x2Eq), - Op::Simd(SimdOp::I64x2Ne) => self.binop(idx, Lang::I64x2Ne), - Op::Simd(SimdOp::I64x2LtS) => self.binop(idx, Lang::I64x2LtS), - Op::Simd(SimdOp::I64x2GtS) => self.binop(idx, Lang::I64x2GtS), - Op::Simd(SimdOp::I64x2LeS) => self.binop(idx, Lang::I64x2LeS), - Op::Simd(SimdOp::I64x2GeS) => self.binop(idx, Lang::I64x2GeS), - Op::Simd(SimdOp::F32x4Eq) => self.binop(idx, Lang::F32x4Eq), - Op::Simd(SimdOp::F32x4Ne) => self.binop(idx, Lang::F32x4Ne), - Op::Simd(SimdOp::F32x4Lt) => self.binop(idx, Lang::F32x4Lt), - Op::Simd(SimdOp::F32x4Gt) => self.binop(idx, Lang::F32x4Gt), - Op::Simd(SimdOp::F32x4Le) => self.binop(idx, Lang::F32x4Le), - Op::Simd(SimdOp::F32x4Ge) => self.binop(idx, Lang::F32x4Ge), - Op::Simd(SimdOp::F64x2Eq) => self.binop(idx, Lang::F64x2Eq), - Op::Simd(SimdOp::F64x2Ne) => self.binop(idx, Lang::F64x2Ne), - Op::Simd(SimdOp::F64x2Lt) => self.binop(idx, Lang::F64x2Lt), - Op::Simd(SimdOp::F64x2Gt) => self.binop(idx, Lang::F64x2Gt), - Op::Simd(SimdOp::F64x2Le) => self.binop(idx, Lang::F64x2Le), - Op::Simd(SimdOp::F64x2Ge) => self.binop(idx, Lang::F64x2Ge), - - Op::Simd(SimdOp::I8x16Abs) => self.unop(idx, Lang::I8x16Abs), - Op::Simd(SimdOp::I8x16Neg) => self.unop(idx, Lang::I8x16Neg), - Op::Simd(SimdOp::I8x16Popcnt) => self.unop(idx, Lang::I8x16Popcnt), - Op::Simd(SimdOp::I8x16AllTrue) => self.unop(idx, Lang::I8x16AllTrue), - Op::Simd(SimdOp::I8x16Bitmask) => self.unop(idx, Lang::I8x16Bitmask), - Op::Simd(SimdOp::I8x16NarrowI16x8S) => self.binop(idx, Lang::I8x16NarrowI16x8S), - Op::Simd(SimdOp::I8x16NarrowI16x8U) => self.binop(idx, Lang::I8x16NarrowI16x8U), - Op::Simd(SimdOp::I8x16Shl) => self.binop(idx, Lang::I8x16Shl), - Op::Simd(SimdOp::I8x16ShrS) => self.binop(idx, Lang::I8x16ShrS), - Op::Simd(SimdOp::I8x16ShrU) => self.binop(idx, Lang::I8x16ShrU), - Op::Simd(SimdOp::I8x16Add) => self.binop(idx, Lang::I8x16Add), - Op::Simd(SimdOp::I8x16AddSatS) => self.binop(idx, Lang::I8x16AddSatS), - Op::Simd(SimdOp::I8x16AddSatU) => self.binop(idx, Lang::I8x16AddSatU), - Op::Simd(SimdOp::I8x16Sub) => self.binop(idx, Lang::I8x16Sub), - Op::Simd(SimdOp::I8x16SubSatS) => self.binop(idx, Lang::I8x16SubSatS), - Op::Simd(SimdOp::I8x16SubSatU) => self.binop(idx, Lang::I8x16SubSatU), - Op::Simd(SimdOp::I8x16MinS) => self.binop(idx, Lang::I8x16MinS), - Op::Simd(SimdOp::I8x16MinU) => self.binop(idx, Lang::I8x16MinU), - Op::Simd(SimdOp::I8x16MaxS) => self.binop(idx, Lang::I8x16MaxS), - Op::Simd(SimdOp::I8x16MaxU) => self.binop(idx, Lang::I8x16MaxU), - Op::Simd(SimdOp::I8x16AvgrU) => self.binop(idx, Lang::I8x16AvgrU), - - Op::Simd(SimdOp::I16x8ExtAddPairwiseI8x16S) => { + Operator::I8x16Splat => self.unop(idx, Lang::I8x16Splat), + Operator::I16x8Splat => self.unop(idx, Lang::I16x8Splat), + Operator::I32x4Splat => self.unop(idx, Lang::I32x4Splat), + Operator::I64x2Splat => self.unop(idx, Lang::I64x2Splat), + Operator::F32x4Splat => self.unop(idx, Lang::F32x4Splat), + Operator::F64x2Splat => self.unop(idx, Lang::F64x2Splat), + + Operator::I8x16Eq => self.binop(idx, Lang::I8x16Eq), + Operator::I8x16Ne => self.binop(idx, Lang::I8x16Ne), + Operator::I8x16LtS => self.binop(idx, Lang::I8x16LtS), + Operator::I8x16LtU => self.binop(idx, Lang::I8x16LtU), + Operator::I8x16GtS => self.binop(idx, Lang::I8x16GtS), + Operator::I8x16GtU => self.binop(idx, Lang::I8x16GtU), + Operator::I8x16LeS => self.binop(idx, Lang::I8x16LeS), + Operator::I8x16LeU => self.binop(idx, Lang::I8x16LeU), + Operator::I8x16GeS => self.binop(idx, Lang::I8x16GeS), + Operator::I8x16GeU => self.binop(idx, Lang::I8x16GeU), + Operator::I16x8Eq => self.binop(idx, Lang::I16x8Eq), + Operator::I16x8Ne => self.binop(idx, Lang::I16x8Ne), + Operator::I16x8LtS => self.binop(idx, Lang::I16x8LtS), + Operator::I16x8LtU => self.binop(idx, Lang::I16x8LtU), + Operator::I16x8GtS => self.binop(idx, Lang::I16x8GtS), + Operator::I16x8GtU => self.binop(idx, Lang::I16x8GtU), + Operator::I16x8LeS => self.binop(idx, Lang::I16x8LeS), + Operator::I16x8LeU => self.binop(idx, Lang::I16x8LeU), + Operator::I16x8GeS => self.binop(idx, Lang::I16x8GeS), + Operator::I16x8GeU => self.binop(idx, Lang::I16x8GeU), + Operator::I32x4Eq => self.binop(idx, Lang::I32x4Eq), + Operator::I32x4Ne => self.binop(idx, Lang::I32x4Ne), + Operator::I32x4LtS => self.binop(idx, Lang::I32x4LtS), + Operator::I32x4LtU => self.binop(idx, Lang::I32x4LtU), + Operator::I32x4GtS => self.binop(idx, Lang::I32x4GtS), + Operator::I32x4GtU => self.binop(idx, Lang::I32x4GtU), + Operator::I32x4LeS => self.binop(idx, Lang::I32x4LeS), + Operator::I32x4LeU => self.binop(idx, Lang::I32x4LeU), + Operator::I32x4GeS => self.binop(idx, Lang::I32x4GeS), + Operator::I32x4GeU => self.binop(idx, Lang::I32x4GeU), + Operator::I64x2Eq => self.binop(idx, Lang::I64x2Eq), + Operator::I64x2Ne => self.binop(idx, Lang::I64x2Ne), + Operator::I64x2LtS => self.binop(idx, Lang::I64x2LtS), + Operator::I64x2GtS => self.binop(idx, Lang::I64x2GtS), + Operator::I64x2LeS => self.binop(idx, Lang::I64x2LeS), + Operator::I64x2GeS => self.binop(idx, Lang::I64x2GeS), + Operator::F32x4Eq => self.binop(idx, Lang::F32x4Eq), + Operator::F32x4Ne => self.binop(idx, Lang::F32x4Ne), + Operator::F32x4Lt => self.binop(idx, Lang::F32x4Lt), + Operator::F32x4Gt => self.binop(idx, Lang::F32x4Gt), + Operator::F32x4Le => self.binop(idx, Lang::F32x4Le), + Operator::F32x4Ge => self.binop(idx, Lang::F32x4Ge), + Operator::F64x2Eq => self.binop(idx, Lang::F64x2Eq), + Operator::F64x2Ne => self.binop(idx, Lang::F64x2Ne), + Operator::F64x2Lt => self.binop(idx, Lang::F64x2Lt), + Operator::F64x2Gt => self.binop(idx, Lang::F64x2Gt), + Operator::F64x2Le => self.binop(idx, Lang::F64x2Le), + Operator::F64x2Ge => self.binop(idx, Lang::F64x2Ge), + + Operator::I8x16Abs => self.unop(idx, Lang::I8x16Abs), + Operator::I8x16Neg => self.unop(idx, Lang::I8x16Neg), + Operator::I8x16Popcnt => self.unop(idx, Lang::I8x16Popcnt), + Operator::I8x16AllTrue => self.unop(idx, Lang::I8x16AllTrue), + Operator::I8x16Bitmask => self.unop(idx, Lang::I8x16Bitmask), + Operator::I8x16NarrowI16x8S => self.binop(idx, Lang::I8x16NarrowI16x8S), + Operator::I8x16NarrowI16x8U => self.binop(idx, Lang::I8x16NarrowI16x8U), + Operator::I8x16Shl => self.binop(idx, Lang::I8x16Shl), + Operator::I8x16ShrS => self.binop(idx, Lang::I8x16ShrS), + Operator::I8x16ShrU => self.binop(idx, Lang::I8x16ShrU), + Operator::I8x16Add => self.binop(idx, Lang::I8x16Add), + Operator::I8x16AddSatS => self.binop(idx, Lang::I8x16AddSatS), + Operator::I8x16AddSatU => self.binop(idx, Lang::I8x16AddSatU), + Operator::I8x16Sub => self.binop(idx, Lang::I8x16Sub), + Operator::I8x16SubSatS => self.binop(idx, Lang::I8x16SubSatS), + Operator::I8x16SubSatU => self.binop(idx, Lang::I8x16SubSatU), + Operator::I8x16MinS => self.binop(idx, Lang::I8x16MinS), + Operator::I8x16MinU => self.binop(idx, Lang::I8x16MinU), + Operator::I8x16MaxS => self.binop(idx, Lang::I8x16MaxS), + Operator::I8x16MaxU => self.binop(idx, Lang::I8x16MaxU), + Operator::I8x16AvgrU => self.binop(idx, Lang::I8x16AvgrU), + + Operator::I16x8ExtAddPairwiseI8x16S => { self.unop(idx, Lang::I16x8ExtAddPairwiseI8x16S) } - Op::Simd(SimdOp::I16x8ExtAddPairwiseI8x16U) => { + Operator::I16x8ExtAddPairwiseI8x16U => { self.unop(idx, Lang::I16x8ExtAddPairwiseI8x16U) } - Op::Simd(SimdOp::I16x8Abs) => self.unop(idx, Lang::I16x8Abs), - Op::Simd(SimdOp::I16x8Neg) => self.unop(idx, Lang::I16x8Neg), - Op::Simd(SimdOp::I16x8Q15MulrSatS) => self.binop(idx, Lang::I16x8Q15MulrSatS), - Op::Simd(SimdOp::I16x8AllTrue) => self.unop(idx, Lang::I16x8AllTrue), - Op::Simd(SimdOp::I16x8Bitmask) => self.unop(idx, Lang::I16x8Bitmask), - Op::Simd(SimdOp::I16x8NarrowI32x4S) => self.binop(idx, Lang::I16x8NarrowI32x4S), - Op::Simd(SimdOp::I16x8NarrowI32x4U) => self.binop(idx, Lang::I16x8NarrowI32x4U), - Op::Simd(SimdOp::I16x8ExtendLowI8x16S) => { - self.unop(idx, Lang::I16x8ExtendLowI8x16S) - } - Op::Simd(SimdOp::I16x8ExtendHighI8x16S) => { - self.unop(idx, Lang::I16x8ExtendHighI8x16S) - } - Op::Simd(SimdOp::I16x8ExtendLowI8x16U) => { - self.unop(idx, Lang::I16x8ExtendLowI8x16U) - } - Op::Simd(SimdOp::I16x8ExtendHighI8x16U) => { - self.unop(idx, Lang::I16x8ExtendHighI8x16U) - } - Op::Simd(SimdOp::I16x8Shl) => self.binop(idx, Lang::I16x8Shl), - Op::Simd(SimdOp::I16x8ShrS) => self.binop(idx, Lang::I16x8ShrS), - Op::Simd(SimdOp::I16x8ShrU) => self.binop(idx, Lang::I16x8ShrU), - Op::Simd(SimdOp::I16x8Add) => self.binop(idx, Lang::I16x8Add), - Op::Simd(SimdOp::I16x8AddSatS) => self.binop(idx, Lang::I16x8AddSatS), - Op::Simd(SimdOp::I16x8AddSatU) => self.binop(idx, Lang::I16x8AddSatU), - Op::Simd(SimdOp::I16x8Sub) => self.binop(idx, Lang::I16x8Sub), - Op::Simd(SimdOp::I16x8SubSatS) => self.binop(idx, Lang::I16x8SubSatS), - Op::Simd(SimdOp::I16x8SubSatU) => self.binop(idx, Lang::I16x8SubSatU), - Op::Simd(SimdOp::I16x8Mul) => self.binop(idx, Lang::I16x8Mul), - Op::Simd(SimdOp::I16x8MinS) => self.binop(idx, Lang::I16x8MinS), - Op::Simd(SimdOp::I16x8MinU) => self.binop(idx, Lang::I16x8MinU), - Op::Simd(SimdOp::I16x8MaxS) => self.binop(idx, Lang::I16x8MaxS), - Op::Simd(SimdOp::I16x8MaxU) => self.binop(idx, Lang::I16x8MaxU), - Op::Simd(SimdOp::I16x8AvgrU) => self.binop(idx, Lang::I16x8AvgrU), - Op::Simd(SimdOp::I16x8ExtMulLowI8x16S) => { - self.binop(idx, Lang::I16x8ExtMulLowI8x16S) - } - Op::Simd(SimdOp::I16x8ExtMulHighI8x16S) => { - self.binop(idx, Lang::I16x8ExtMulHighI8x16S) - } - Op::Simd(SimdOp::I16x8ExtMulLowI8x16U) => { - self.binop(idx, Lang::I16x8ExtMulLowI8x16U) - } - Op::Simd(SimdOp::I16x8ExtMulHighI8x16U) => { - self.binop(idx, Lang::I16x8ExtMulHighI8x16U) - } - - Op::Simd(SimdOp::I32x4ExtAddPairwiseI16x8S) => { + Operator::I16x8Abs => self.unop(idx, Lang::I16x8Abs), + Operator::I16x8Neg => self.unop(idx, Lang::I16x8Neg), + Operator::I16x8Q15MulrSatS => self.binop(idx, Lang::I16x8Q15MulrSatS), + Operator::I16x8AllTrue => self.unop(idx, Lang::I16x8AllTrue), + Operator::I16x8Bitmask => self.unop(idx, Lang::I16x8Bitmask), + Operator::I16x8NarrowI32x4S => self.binop(idx, Lang::I16x8NarrowI32x4S), + Operator::I16x8NarrowI32x4U => self.binop(idx, Lang::I16x8NarrowI32x4U), + Operator::I16x8ExtendLowI8x16S => self.unop(idx, Lang::I16x8ExtendLowI8x16S), + Operator::I16x8ExtendHighI8x16S => self.unop(idx, Lang::I16x8ExtendHighI8x16S), + Operator::I16x8ExtendLowI8x16U => self.unop(idx, Lang::I16x8ExtendLowI8x16U), + Operator::I16x8ExtendHighI8x16U => self.unop(idx, Lang::I16x8ExtendHighI8x16U), + Operator::I16x8Shl => self.binop(idx, Lang::I16x8Shl), + Operator::I16x8ShrS => self.binop(idx, Lang::I16x8ShrS), + Operator::I16x8ShrU => self.binop(idx, Lang::I16x8ShrU), + Operator::I16x8Add => self.binop(idx, Lang::I16x8Add), + Operator::I16x8AddSatS => self.binop(idx, Lang::I16x8AddSatS), + Operator::I16x8AddSatU => self.binop(idx, Lang::I16x8AddSatU), + Operator::I16x8Sub => self.binop(idx, Lang::I16x8Sub), + Operator::I16x8SubSatS => self.binop(idx, Lang::I16x8SubSatS), + Operator::I16x8SubSatU => self.binop(idx, Lang::I16x8SubSatU), + Operator::I16x8Mul => self.binop(idx, Lang::I16x8Mul), + Operator::I16x8MinS => self.binop(idx, Lang::I16x8MinS), + Operator::I16x8MinU => self.binop(idx, Lang::I16x8MinU), + Operator::I16x8MaxS => self.binop(idx, Lang::I16x8MaxS), + Operator::I16x8MaxU => self.binop(idx, Lang::I16x8MaxU), + Operator::I16x8AvgrU => self.binop(idx, Lang::I16x8AvgrU), + Operator::I16x8ExtMulLowI8x16S => self.binop(idx, Lang::I16x8ExtMulLowI8x16S), + Operator::I16x8ExtMulHighI8x16S => self.binop(idx, Lang::I16x8ExtMulHighI8x16S), + Operator::I16x8ExtMulLowI8x16U => self.binop(idx, Lang::I16x8ExtMulLowI8x16U), + Operator::I16x8ExtMulHighI8x16U => self.binop(idx, Lang::I16x8ExtMulHighI8x16U), + + Operator::I32x4ExtAddPairwiseI16x8S => { self.unop(idx, Lang::I32x4ExtAddPairwiseI16x8S) } - Op::Simd(SimdOp::I32x4ExtAddPairwiseI16x8U) => { + Operator::I32x4ExtAddPairwiseI16x8U => { self.unop(idx, Lang::I32x4ExtAddPairwiseI16x8U) } - Op::Simd(SimdOp::I32x4Abs) => self.unop(idx, Lang::I32x4Abs), - Op::Simd(SimdOp::I32x4Neg) => self.unop(idx, Lang::I32x4Neg), - Op::Simd(SimdOp::I32x4AllTrue) => self.unop(idx, Lang::I32x4AllTrue), - Op::Simd(SimdOp::I32x4Bitmask) => self.unop(idx, Lang::I32x4Bitmask), - Op::Simd(SimdOp::I32x4ExtendLowI16x8S) => { - self.unop(idx, Lang::I32x4ExtendLowI16x8S) - } - Op::Simd(SimdOp::I32x4ExtendHighI16x8S) => { - self.unop(idx, Lang::I32x4ExtendHighI16x8S) - } - Op::Simd(SimdOp::I32x4ExtendLowI16x8U) => { - self.unop(idx, Lang::I32x4ExtendLowI16x8U) - } - Op::Simd(SimdOp::I32x4ExtendHighI16x8U) => { - self.unop(idx, Lang::I32x4ExtendHighI16x8U) - } - Op::Simd(SimdOp::I32x4Shl) => self.binop(idx, Lang::I32x4Shl), - Op::Simd(SimdOp::I32x4ShrS) => self.binop(idx, Lang::I32x4ShrS), - Op::Simd(SimdOp::I32x4ShrU) => self.binop(idx, Lang::I32x4ShrU), - Op::Simd(SimdOp::I32x4Add) => self.binop(idx, Lang::I32x4Add), - Op::Simd(SimdOp::I32x4Sub) => self.binop(idx, Lang::I32x4Sub), - Op::Simd(SimdOp::I32x4Mul) => self.binop(idx, Lang::I32x4Mul), - Op::Simd(SimdOp::I32x4MinS) => self.binop(idx, Lang::I32x4MinS), - Op::Simd(SimdOp::I32x4MinU) => self.binop(idx, Lang::I32x4MinU), - Op::Simd(SimdOp::I32x4MaxS) => self.binop(idx, Lang::I32x4MaxS), - Op::Simd(SimdOp::I32x4MaxU) => self.binop(idx, Lang::I32x4MaxU), - Op::Simd(SimdOp::I32x4DotI16x8S) => self.binop(idx, Lang::I32x4DotI16x8S), - Op::Simd(SimdOp::I32x4ExtMulLowI16x8S) => { - self.binop(idx, Lang::I32x4ExtMulLowI16x8S) - } - Op::Simd(SimdOp::I32x4ExtMulHighI16x8S) => { - self.binop(idx, Lang::I32x4ExtMulHighI16x8S) - } - Op::Simd(SimdOp::I32x4ExtMulLowI16x8U) => { - self.binop(idx, Lang::I32x4ExtMulLowI16x8U) - } - Op::Simd(SimdOp::I32x4ExtMulHighI16x8U) => { - self.binop(idx, Lang::I32x4ExtMulHighI16x8U) - } - - Op::Simd(SimdOp::I64x2Abs) => self.unop(idx, Lang::I64x2Abs), - Op::Simd(SimdOp::I64x2Neg) => self.unop(idx, Lang::I64x2Neg), - Op::Simd(SimdOp::I64x2AllTrue) => self.unop(idx, Lang::I64x2AllTrue), - Op::Simd(SimdOp::I64x2Bitmask) => self.unop(idx, Lang::I64x2Bitmask), - Op::Simd(SimdOp::I64x2ExtendLowI32x4S) => { - self.unop(idx, Lang::I64x2ExtendLowI32x4S) - } - Op::Simd(SimdOp::I64x2ExtendHighI32x4S) => { - self.unop(idx, Lang::I64x2ExtendHighI32x4S) - } - Op::Simd(SimdOp::I64x2ExtendLowI32x4U) => { - self.unop(idx, Lang::I64x2ExtendLowI32x4U) - } - Op::Simd(SimdOp::I64x2ExtendHighI32x4U) => { - self.unop(idx, Lang::I64x2ExtendHighI32x4U) - } - Op::Simd(SimdOp::I64x2Shl) => self.binop(idx, Lang::I64x2Shl), - Op::Simd(SimdOp::I64x2ShrS) => self.binop(idx, Lang::I64x2ShrS), - Op::Simd(SimdOp::I64x2ShrU) => self.binop(idx, Lang::I64x2ShrU), - Op::Simd(SimdOp::I64x2Add) => self.binop(idx, Lang::I64x2Add), - Op::Simd(SimdOp::I64x2Sub) => self.binop(idx, Lang::I64x2Sub), - Op::Simd(SimdOp::I64x2Mul) => self.binop(idx, Lang::I64x2Mul), - Op::Simd(SimdOp::I64x2ExtMulLowI32x4S) => { - self.binop(idx, Lang::I64x2ExtMulLowI32x4S) - } - Op::Simd(SimdOp::I64x2ExtMulHighI32x4S) => { - self.binop(idx, Lang::I64x2ExtMulHighI32x4S) - } - Op::Simd(SimdOp::I64x2ExtMulLowI32x4U) => { - self.binop(idx, Lang::I64x2ExtMulLowI32x4U) - } - Op::Simd(SimdOp::I64x2ExtMulHighI32x4U) => { - self.binop(idx, Lang::I64x2ExtMulHighI32x4U) - } - - Op::Simd(SimdOp::F32x4Ceil) => self.unop(idx, Lang::F32x4Ceil), - Op::Simd(SimdOp::F32x4Floor) => self.unop(idx, Lang::F32x4Floor), - Op::Simd(SimdOp::F32x4Trunc) => self.unop(idx, Lang::F32x4Trunc), - Op::Simd(SimdOp::F32x4Nearest) => self.unop(idx, Lang::F32x4Nearest), - Op::Simd(SimdOp::F32x4Abs) => self.unop(idx, Lang::F32x4Abs), - Op::Simd(SimdOp::F32x4Neg) => self.unop(idx, Lang::F32x4Neg), - Op::Simd(SimdOp::F32x4Sqrt) => self.unop(idx, Lang::F32x4Sqrt), - Op::Simd(SimdOp::F32x4Add) => self.binop(idx, Lang::F32x4Add), - Op::Simd(SimdOp::F32x4Sub) => self.binop(idx, Lang::F32x4Sub), - Op::Simd(SimdOp::F32x4Mul) => self.binop(idx, Lang::F32x4Mul), - Op::Simd(SimdOp::F32x4Div) => self.binop(idx, Lang::F32x4Div), - Op::Simd(SimdOp::F32x4Min) => self.binop(idx, Lang::F32x4Min), - Op::Simd(SimdOp::F32x4Max) => self.binop(idx, Lang::F32x4Max), - Op::Simd(SimdOp::F32x4PMin) => self.binop(idx, Lang::F32x4PMin), - Op::Simd(SimdOp::F32x4PMax) => self.binop(idx, Lang::F32x4PMax), - Op::Simd(SimdOp::F64x2Ceil) => self.unop(idx, Lang::F64x2Ceil), - Op::Simd(SimdOp::F64x2Floor) => self.unop(idx, Lang::F64x2Floor), - Op::Simd(SimdOp::F64x2Trunc) => self.unop(idx, Lang::F64x2Trunc), - Op::Simd(SimdOp::F64x2Nearest) => self.unop(idx, Lang::F64x2Nearest), - Op::Simd(SimdOp::F64x2Abs) => self.unop(idx, Lang::F64x2Abs), - Op::Simd(SimdOp::F64x2Neg) => self.unop(idx, Lang::F64x2Neg), - Op::Simd(SimdOp::F64x2Sqrt) => self.unop(idx, Lang::F64x2Sqrt), - Op::Simd(SimdOp::F64x2Add) => self.binop(idx, Lang::F64x2Add), - Op::Simd(SimdOp::F64x2Sub) => self.binop(idx, Lang::F64x2Sub), - Op::Simd(SimdOp::F64x2Mul) => self.binop(idx, Lang::F64x2Mul), - Op::Simd(SimdOp::F64x2Div) => self.binop(idx, Lang::F64x2Div), - Op::Simd(SimdOp::F64x2Min) => self.binop(idx, Lang::F64x2Min), - Op::Simd(SimdOp::F64x2Max) => self.binop(idx, Lang::F64x2Max), - Op::Simd(SimdOp::F64x2PMin) => self.binop(idx, Lang::F64x2PMin), - Op::Simd(SimdOp::F64x2PMax) => self.binop(idx, Lang::F64x2PMax), - - Op::Simd(SimdOp::I32x4TruncSatF32x4S) => self.unop(idx, Lang::I32x4TruncSatF32x4S), - Op::Simd(SimdOp::I32x4TruncSatF32x4U) => self.unop(idx, Lang::I32x4TruncSatF32x4U), - Op::Simd(SimdOp::F32x4ConvertI32x4S) => self.unop(idx, Lang::F32x4ConvertI32x4S), - Op::Simd(SimdOp::F32x4ConvertI32x4U) => self.unop(idx, Lang::F32x4ConvertI32x4U), - Op::Simd(SimdOp::I32x4TruncSatF64x2SZero) => { - self.unop(idx, Lang::I32x4TruncSatF64x2SZero) - } - Op::Simd(SimdOp::I32x4TruncSatF64x2UZero) => { - self.unop(idx, Lang::I32x4TruncSatF64x2UZero) - } - Op::Simd(SimdOp::F64x2ConvertLowI32x4S) => { - self.unop(idx, Lang::F64x2ConvertLowI32x4S) - } - Op::Simd(SimdOp::F64x2ConvertLowI32x4U) => { - self.unop(idx, Lang::F64x2ConvertLowI32x4U) - } - Op::Simd(SimdOp::F32x4DemoteF64x2Zero) => { - self.unop(idx, Lang::F32x4DemoteF64x2Zero) - } - Op::Simd(SimdOp::F64x2PromoteLowF32x4) => { - self.unop(idx, Lang::F64x2PromoteLowF32x4) - } + Operator::I32x4Abs => self.unop(idx, Lang::I32x4Abs), + Operator::I32x4Neg => self.unop(idx, Lang::I32x4Neg), + Operator::I32x4AllTrue => self.unop(idx, Lang::I32x4AllTrue), + Operator::I32x4Bitmask => self.unop(idx, Lang::I32x4Bitmask), + Operator::I32x4ExtendLowI16x8S => self.unop(idx, Lang::I32x4ExtendLowI16x8S), + Operator::I32x4ExtendHighI16x8S => self.unop(idx, Lang::I32x4ExtendHighI16x8S), + Operator::I32x4ExtendLowI16x8U => self.unop(idx, Lang::I32x4ExtendLowI16x8U), + Operator::I32x4ExtendHighI16x8U => self.unop(idx, Lang::I32x4ExtendHighI16x8U), + Operator::I32x4Shl => self.binop(idx, Lang::I32x4Shl), + Operator::I32x4ShrS => self.binop(idx, Lang::I32x4ShrS), + Operator::I32x4ShrU => self.binop(idx, Lang::I32x4ShrU), + Operator::I32x4Add => self.binop(idx, Lang::I32x4Add), + Operator::I32x4Sub => self.binop(idx, Lang::I32x4Sub), + Operator::I32x4Mul => self.binop(idx, Lang::I32x4Mul), + Operator::I32x4MinS => self.binop(idx, Lang::I32x4MinS), + Operator::I32x4MinU => self.binop(idx, Lang::I32x4MinU), + Operator::I32x4MaxS => self.binop(idx, Lang::I32x4MaxS), + Operator::I32x4MaxU => self.binop(idx, Lang::I32x4MaxU), + Operator::I32x4DotI16x8S => self.binop(idx, Lang::I32x4DotI16x8S), + Operator::I32x4ExtMulLowI16x8S => self.binop(idx, Lang::I32x4ExtMulLowI16x8S), + Operator::I32x4ExtMulHighI16x8S => self.binop(idx, Lang::I32x4ExtMulHighI16x8S), + Operator::I32x4ExtMulLowI16x8U => self.binop(idx, Lang::I32x4ExtMulLowI16x8U), + Operator::I32x4ExtMulHighI16x8U => self.binop(idx, Lang::I32x4ExtMulHighI16x8U), + + Operator::I64x2Abs => self.unop(idx, Lang::I64x2Abs), + Operator::I64x2Neg => self.unop(idx, Lang::I64x2Neg), + Operator::I64x2AllTrue => self.unop(idx, Lang::I64x2AllTrue), + Operator::I64x2Bitmask => self.unop(idx, Lang::I64x2Bitmask), + Operator::I64x2ExtendLowI32x4S => self.unop(idx, Lang::I64x2ExtendLowI32x4S), + Operator::I64x2ExtendHighI32x4S => self.unop(idx, Lang::I64x2ExtendHighI32x4S), + Operator::I64x2ExtendLowI32x4U => self.unop(idx, Lang::I64x2ExtendLowI32x4U), + Operator::I64x2ExtendHighI32x4U => self.unop(idx, Lang::I64x2ExtendHighI32x4U), + Operator::I64x2Shl => self.binop(idx, Lang::I64x2Shl), + Operator::I64x2ShrS => self.binop(idx, Lang::I64x2ShrS), + Operator::I64x2ShrU => self.binop(idx, Lang::I64x2ShrU), + Operator::I64x2Add => self.binop(idx, Lang::I64x2Add), + Operator::I64x2Sub => self.binop(idx, Lang::I64x2Sub), + Operator::I64x2Mul => self.binop(idx, Lang::I64x2Mul), + Operator::I64x2ExtMulLowI32x4S => self.binop(idx, Lang::I64x2ExtMulLowI32x4S), + Operator::I64x2ExtMulHighI32x4S => self.binop(idx, Lang::I64x2ExtMulHighI32x4S), + Operator::I64x2ExtMulLowI32x4U => self.binop(idx, Lang::I64x2ExtMulLowI32x4U), + Operator::I64x2ExtMulHighI32x4U => self.binop(idx, Lang::I64x2ExtMulHighI32x4U), + + Operator::F32x4Ceil => self.unop(idx, Lang::F32x4Ceil), + Operator::F32x4Floor => self.unop(idx, Lang::F32x4Floor), + Operator::F32x4Trunc => self.unop(idx, Lang::F32x4Trunc), + Operator::F32x4Nearest => self.unop(idx, Lang::F32x4Nearest), + Operator::F32x4Abs => self.unop(idx, Lang::F32x4Abs), + Operator::F32x4Neg => self.unop(idx, Lang::F32x4Neg), + Operator::F32x4Sqrt => self.unop(idx, Lang::F32x4Sqrt), + Operator::F32x4Add => self.binop(idx, Lang::F32x4Add), + Operator::F32x4Sub => self.binop(idx, Lang::F32x4Sub), + Operator::F32x4Mul => self.binop(idx, Lang::F32x4Mul), + Operator::F32x4Div => self.binop(idx, Lang::F32x4Div), + Operator::F32x4Min => self.binop(idx, Lang::F32x4Min), + Operator::F32x4Max => self.binop(idx, Lang::F32x4Max), + Operator::F32x4PMin => self.binop(idx, Lang::F32x4PMin), + Operator::F32x4PMax => self.binop(idx, Lang::F32x4PMax), + Operator::F64x2Ceil => self.unop(idx, Lang::F64x2Ceil), + Operator::F64x2Floor => self.unop(idx, Lang::F64x2Floor), + Operator::F64x2Trunc => self.unop(idx, Lang::F64x2Trunc), + Operator::F64x2Nearest => self.unop(idx, Lang::F64x2Nearest), + Operator::F64x2Abs => self.unop(idx, Lang::F64x2Abs), + Operator::F64x2Neg => self.unop(idx, Lang::F64x2Neg), + Operator::F64x2Sqrt => self.unop(idx, Lang::F64x2Sqrt), + Operator::F64x2Add => self.binop(idx, Lang::F64x2Add), + Operator::F64x2Sub => self.binop(idx, Lang::F64x2Sub), + Operator::F64x2Mul => self.binop(idx, Lang::F64x2Mul), + Operator::F64x2Div => self.binop(idx, Lang::F64x2Div), + Operator::F64x2Min => self.binop(idx, Lang::F64x2Min), + Operator::F64x2Max => self.binop(idx, Lang::F64x2Max), + Operator::F64x2PMin => self.binop(idx, Lang::F64x2PMin), + Operator::F64x2PMax => self.binop(idx, Lang::F64x2PMax), + + Operator::I32x4TruncSatF32x4S => self.unop(idx, Lang::I32x4TruncSatF32x4S), + Operator::I32x4TruncSatF32x4U => self.unop(idx, Lang::I32x4TruncSatF32x4U), + Operator::F32x4ConvertI32x4S => self.unop(idx, Lang::F32x4ConvertI32x4S), + Operator::F32x4ConvertI32x4U => self.unop(idx, Lang::F32x4ConvertI32x4U), + Operator::I32x4TruncSatF64x2SZero => self.unop(idx, Lang::I32x4TruncSatF64x2SZero), + Operator::I32x4TruncSatF64x2UZero => self.unop(idx, Lang::I32x4TruncSatF64x2UZero), + Operator::F64x2ConvertLowI32x4S => self.unop(idx, Lang::F64x2ConvertLowI32x4S), + Operator::F64x2ConvertLowI32x4U => self.unop(idx, Lang::F64x2ConvertLowI32x4U), + Operator::F32x4DemoteF64x2Zero => self.unop(idx, Lang::F32x4DemoteF64x2Zero), + Operator::F64x2PromoteLowF32x4 => self.unop(idx, Lang::F64x2PromoteLowF32x4), op => { // If the operator is not implemented, warn and bail out. We From 386ce2c5e05333f86ba139f3e91c73e5d429a04d Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 26 Nov 2024 21:54:56 +0100 Subject: [PATCH 74/83] wasm-dump: fix wasmparser usage --- src/bin/wasm-tools/dump.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/bin/wasm-tools/dump.rs b/src/bin/wasm-tools/dump.rs index 8a8ef69219..5379956851 100644 --- a/src/bin/wasm-tools/dump.rs +++ b/src/bin/wasm-tools/dump.rs @@ -831,5 +831,13 @@ macro_rules! define_visit_operator { impl<'a> VisitOperator<'a> for Dump<'_> { type Output = (); - wasmparser::for_each_operator!(define_visit_operator); + fn simd_visitor(&mut self) -> Option<&mut dyn VisitSimdOperator<'a, Output = Self::Output>> { + Some(self) + } + + wasmparser::for_each_visit_operator!(define_visit_operator); +} + +impl<'a> VisitSimdOperator<'a> for Dump<'_> { + wasmparser::for_each_visit_simd_operator!(define_visit_operator); } From 809e9c5e6dab4d104ea859717121c33015b5dd3e Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 26 Nov 2024 22:00:41 +0100 Subject: [PATCH 75/83] wasmparser: fix benchmarks --- crates/wasmparser/benches/benchmark.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/wasmparser/benches/benchmark.rs b/crates/wasmparser/benches/benchmark.rs index 16500b95b2..210de00058 100644 --- a/crates/wasmparser/benches/benchmark.rs +++ b/crates/wasmparser/benches/benchmark.rs @@ -369,10 +369,10 @@ impl<'a> VisitOperator<'a> for NopVisit { Some(self) } - wasmparser::for_each_operator!(define_visit_operator); + wasmparser::for_each_visit_operator!(define_visit_operator); } #[allow(unused_variables)] impl<'a> VisitSimdOperator<'a> for NopVisit { - wasmparser::for_each_simd_operator!(define_visit_operator); + wasmparser::for_each_visit_simd_operator!(define_visit_operator); } From 4e9ec4ac1a2fef543d6a6f94c51869425c557bf8 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 26 Nov 2024 22:04:03 +0100 Subject: [PATCH 76/83] fix doc tests --- crates/wasmparser/src/binary_reader.rs | 4 ++-- crates/wasmparser/src/lib.rs | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/crates/wasmparser/src/binary_reader.rs b/crates/wasmparser/src/binary_reader.rs index c5bcd5a058..d1eb395403 100644 --- a/crates/wasmparser/src/binary_reader.rs +++ b/crates/wasmparser/src/binary_reader.rs @@ -849,7 +849,7 @@ impl<'a> BinaryReader<'a> { /// Store an offset for use in diagnostics or any other purposes: /// /// ``` - /// # use wasmparser::{BinaryReader, VisitOperator, Result, for_each_operator}; + /// # use wasmparser::{BinaryReader, VisitOperator, Result, for_each_visit_operator}; /// /// pub fn dump(mut reader: BinaryReader) -> Result<()> { /// let mut visitor = Dumper { offset: 0 }; @@ -876,7 +876,7 @@ impl<'a> BinaryReader<'a> { /// /// impl<'a> VisitOperator<'a> for Dumper { /// type Output = (); - /// for_each_operator!(define_visit_operator); + /// for_each_visit_operator!(define_visit_operator); /// } /// /// ``` diff --git a/crates/wasmparser/src/lib.rs b/crates/wasmparser/src/lib.rs index 8fca9cb9af..0ac582eff5 100644 --- a/crates/wasmparser/src/lib.rs +++ b/crates/wasmparser/src/lib.rs @@ -1010,7 +1010,7 @@ pub use _for_each_operator as _for_each_operator_delegate; /// impl<'a> wasmparser::VisitOperator<'a> for VisitAndDoNothing { /// type Output = (); /// -/// wasmparser::for_each_operator!(define_visit_operator); +/// wasmparser::for_each_visit_operator!(define_visit_operator); /// } /// ``` /// @@ -1060,8 +1060,8 @@ pub use _for_each_operator as _for_each_operator_delegate; /// impl<'a> wasmparser::VisitOperator<'a> for VisitOnlyMvp { /// type Output = bool; /// -/// wasmparser::for_each_operator!(visit_only_mvp); -/// # wasmparser::for_each_operator!(visit_mvp); +/// wasmparser::for_each_visit_operator!(visit_only_mvp); +/// # wasmparser::for_each_visit_operator!(visit_mvp); /// /// // manually define `visit_*` for all MVP operators here /// } @@ -1166,7 +1166,7 @@ pub use _for_each_operator_delegate as for_each_operator; /// impl<'a> wasmparser::VisitOperator<'a> for VisitAndDoNothing { /// type Output = (); /// -/// wasmparser::for_each_operator!(define_visit_operator); +/// wasmparser::for_each_visit_operator!(define_visit_operator); /// } /// ``` /// @@ -1216,8 +1216,8 @@ pub use _for_each_operator_delegate as for_each_operator; /// impl<'a> wasmparser::VisitOperator<'a> for VisitOnlyMvp { /// type Output = bool; /// -/// wasmparser::for_each_operator!(visit_only_mvp); -/// # wasmparser::for_each_operator!(visit_mvp); +/// wasmparser::for_each_visit_operator!(visit_only_mvp); +/// # wasmparser::for_each_visit_operator!(visit_mvp); /// /// // manually define `visit_*` for all MVP operators here /// } From 05a94e3bff4e26997b02cf957c77129266262dc7 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 26 Nov 2024 22:11:32 +0100 Subject: [PATCH 77/83] change doc example for for_each_operator macro --- crates/wasmparser/src/lib.rs | 64 +++++++++++++++++------------------- 1 file changed, 31 insertions(+), 33 deletions(-) diff --git a/crates/wasmparser/src/lib.rs b/crates/wasmparser/src/lib.rs index 0ac582eff5..76e9497abe 100644 --- a/crates/wasmparser/src/lib.rs +++ b/crates/wasmparser/src/lib.rs @@ -977,41 +977,39 @@ pub use _for_each_operator as _for_each_operator_delegate; /// https://github.com/WebAssembly/wide-arithmetic /// /// ``` -/// macro_rules! define_visit_operator { -/// // The outer layer of repetition represents how all operators are -/// // provided to the macro at the same time. -/// // -/// // The `$proposal` identifier indicates the Wasm proposals from which -/// // the Wasm operator is originating. -/// // For example to specialize the macro match arm for Wasm SIMD proposal -/// // operators you could write `@simd` instead of `@$proposal:ident` to -/// // only catch those operators. -/// // -/// // The `$op` name is bound to the `Operator` variant name. The -/// // payload of the operator is optionally specified (the `$(...)?` -/// // clause) since not all instructions have payloads. Within the payload -/// // each argument is named and has its type specified. -/// // -/// // The `$visit` name is bound to the corresponding name in the -/// // `VisitOperator` trait that this corresponds to. -/// // -/// // The `$ann` annotations give information about the operator's type (e.g. binary i32 or arity 2 -> 1). -/// ($( @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => { -/// $( -/// fn $visit(&mut self $($(,$arg: $argty)*)?) { -/// // do nothing for this example +/// fn do_nothing(op: &wasmparser::Operator) { +/// macro_rules! define_impl_operator { +/// // The outer layer of repetition represents how all operators are +/// // provided to the macro at the same time. +/// // +/// // The `$proposal` identifier indicates the Wasm proposals from which +/// // the Wasm operator is originating. +/// // For example to specialize the macro match arm for Wasm SIMD proposal +/// // operators you could write `@simd` instead of `@$proposal:ident` to +/// // only catch those operators. +/// // +/// // The `$op` name is bound to the `Operator` variant name. The +/// // payload of the operator is optionally specified (the `$(...)?` +/// // clause) since not all instructions have payloads. Within the payload +/// // each argument is named and has its type specified. +/// // +/// // The `$visit` name is bound to the corresponding name in the +/// // `VisitOperator` trait that this corresponds to. +/// // +/// // The `$ann` annotations give information about the operator's type (e.g. binary i32 or arity 2 -> 1). +/// ($( @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => { +/// match op { +/// $( +/// wasmparser::Operator::$op $( { $($arg),* } )? => { +/// // do nothing for this example +/// } +/// )* +/// _ => unreachable!(), // required because `Operator` enum is non-exhaustive /// } -/// )* +/// } /// } -/// } -/// -/// pub struct VisitAndDoNothing; -/// -/// impl<'a> wasmparser::VisitOperator<'a> for VisitAndDoNothing { -/// type Output = (); -/// -/// wasmparser::for_each_visit_operator!(define_visit_operator); -/// } +/// wasmparser::for_each_operator!(define_impl_operator); +/// } /// ``` /// /// If you only wanted to visit the initial base set of wasm instructions, for From ba7d720c8067637c4380f1777f7219ae41ad1874 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 26 Nov 2024 22:13:08 +0100 Subject: [PATCH 78/83] rename doc test macro --- crates/wasmparser/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/wasmparser/src/lib.rs b/crates/wasmparser/src/lib.rs index 76e9497abe..fe5650499b 100644 --- a/crates/wasmparser/src/lib.rs +++ b/crates/wasmparser/src/lib.rs @@ -978,7 +978,7 @@ pub use _for_each_operator as _for_each_operator_delegate; /// /// ``` /// fn do_nothing(op: &wasmparser::Operator) { -/// macro_rules! define_impl_operator { +/// macro_rules! define_match_operator { /// // The outer layer of repetition represents how all operators are /// // provided to the macro at the same time. /// // @@ -1008,7 +1008,7 @@ pub use _for_each_operator as _for_each_operator_delegate; /// } /// } /// } -/// wasmparser::for_each_operator!(define_impl_operator); +/// wasmparser::for_each_operator!(define_match_operator); /// } /// ``` /// From 87aba0c6f32ace37c0e941301561949fbdc01a53 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 26 Nov 2024 22:14:31 +0100 Subject: [PATCH 79/83] fix typo --- crates/wasmparser/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/wasmparser/src/lib.rs b/crates/wasmparser/src/lib.rs index fe5650499b..021965ae84 100644 --- a/crates/wasmparser/src/lib.rs +++ b/crates/wasmparser/src/lib.rs @@ -1038,7 +1038,7 @@ pub use _for_each_operator as _for_each_operator_delegate; /// } /// } /// # // to get this example to compile another macro is used here to define -/// # // visit methods for all mvp oeprators. +/// # // visit methods for all mvp operators. /// # macro_rules! visit_mvp { /// # ($( @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => { /// # $( From c2823bc0e86fcc3ba9f4576da2b97b9aff860e17 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 26 Nov 2024 22:15:47 +0100 Subject: [PATCH 80/83] apply rustfmt --- crates/wasmparser/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/wasmparser/src/lib.rs b/crates/wasmparser/src/lib.rs index 021965ae84..d596f1ee38 100644 --- a/crates/wasmparser/src/lib.rs +++ b/crates/wasmparser/src/lib.rs @@ -1009,7 +1009,7 @@ pub use _for_each_operator as _for_each_operator_delegate; /// } /// } /// wasmparser::for_each_operator!(define_match_operator); -/// } +/// } /// ``` /// /// If you only wanted to visit the initial base set of wasm instructions, for From c07723b2b67291c112c6a47b4a1798d7a12c53b3 Mon Sep 17 00:00:00 2001 From: Robin Freyler Date: Tue, 26 Nov 2024 22:37:37 +0100 Subject: [PATCH 81/83] craft new doc example for for_each_operator! macro --- crates/wasmparser/src/lib.rs | 61 ++++++++++-------------------------- 1 file changed, 17 insertions(+), 44 deletions(-) diff --git a/crates/wasmparser/src/lib.rs b/crates/wasmparser/src/lib.rs index d596f1ee38..aedef0cd1e 100644 --- a/crates/wasmparser/src/lib.rs +++ b/crates/wasmparser/src/lib.rs @@ -1016,52 +1016,25 @@ pub use _for_each_operator as _for_each_operator_delegate; /// example, you could do: /// /// ``` -/// macro_rules! visit_only_mvp { -/// // delegate the macro invocation to sub-invocations of this macro to -/// // deal with each instruction on a case-by-case basis. -/// ($( @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => { -/// $( -/// visit_only_mvp!(visit_one @$proposal $op $({ $($arg: $argty),* })? => $visit); -/// )* -/// }; -/// -/// // MVP instructions are defined manually, so do nothing. -/// (visit_one @mvp $($rest:tt)*) => {}; +/// fn is_mvp_operator(op: &wasmparser::Operator) -> bool { +/// macro_rules! define_match_operator { +/// // delegate the macro invocation to sub-invocations of this macro to +/// // deal with each instruction on a case-by-case basis. +/// ($( @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => { +/// match op { +/// $( +/// wasmparser::Operator::$op $( { $($arg),* } )? => { +/// define_match_operator!(impl_one @$proposal) +/// } +/// )* +/// _ => unreachable!(), // required because `Operator` enum is non-exhaustive +/// } +/// }; /// -/// // Non-MVP instructions all return `false` here. The exact type depends -/// // on `type Output` in the trait implementation below. You could change -/// // it to `Result<()>` for example and return an error here too. -/// (visit_one @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident) => { -/// fn $visit(&mut self $($(,$arg: $argty)*)?) -> bool { -/// false -/// } +/// (impl_one @mvp) => { true }; +/// (impl_one @$proposal:ident) => { false }; /// } -/// } -/// # // to get this example to compile another macro is used here to define -/// # // visit methods for all mvp operators. -/// # macro_rules! visit_mvp { -/// # ($( @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => { -/// # $( -/// # visit_mvp!(visit_one @$proposal $op $({ $($arg: $argty),* })? => $visit); -/// # )* -/// # }; -/// # (visit_one @mvp $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident) => { -/// # fn $visit(&mut self $($(,$arg: $argty)*)?) -> bool { -/// # true -/// # } -/// # }; -/// # (visit_one @$proposal:ident $($rest:tt)*) => {}; -/// # } -/// -/// pub struct VisitOnlyMvp; -/// -/// impl<'a> wasmparser::VisitOperator<'a> for VisitOnlyMvp { -/// type Output = bool; -/// -/// wasmparser::for_each_visit_operator!(visit_only_mvp); -/// # wasmparser::for_each_visit_operator!(visit_mvp); -/// -/// // manually define `visit_*` for all MVP operators here +/// wasmparser::for_each_operator!(define_match_operator) /// } /// ``` #[doc(inline)] From 36f024ae017f7acd8e566a2ce431ca1631fc9f7a Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Tue, 26 Nov 2024 14:13:10 -0800 Subject: [PATCH 82/83] Minor changes * Revert `crates/wasm-encoder/src/reencode.rs` back to `main` version (no changes needed any more) * Minor whitespace/style changes * Document the macros some more in `wasmparser/src/lib.rs` * Reorganize macro-defining-macros to have more predictable names. --- crates/wasm-encoder/src/reencode.rs | 149 ++++++++++++++-------------- crates/wasm-mutate/Cargo.toml | 2 +- crates/wasmparser/src/arity.rs | 4 +- crates/wasmparser/src/lib.rs | 143 ++++++++++++++------------ 4 files changed, 154 insertions(+), 144 deletions(-) diff --git a/crates/wasm-encoder/src/reencode.rs b/crates/wasm-encoder/src/reencode.rs index 7de2f05a06..b85c406166 100644 --- a/crates/wasm-encoder/src/reencode.rs +++ b/crates/wasm-encoder/src/reencode.rs @@ -601,7 +601,6 @@ impl Reencode for RoundtripReencoder { #[allow(missing_docs)] // FIXME pub mod utils { use super::{Error, Reencode}; - use crate::Instruction; use crate::{CoreTypeEncoder, Encode}; use std::ops::Range; @@ -1547,98 +1546,94 @@ pub mod utils { } } - #[rustfmt::skip] - macro_rules! translate_map { - // This case is used to map, based on the name of the field, from the - // wasmparser payload type to the wasm-encoder payload type through - // `Translator` as applicable. - ($reencoder:ident $arg:ident tag_index) => ($reencoder.tag_index($arg)); - ($reencoder:ident $arg:ident function_index) => ($reencoder.function_index($arg)); - ($reencoder:ident $arg:ident table) => ($reencoder.table_index($arg)); - ($reencoder:ident $arg:ident table_index) => ($reencoder.table_index($arg)); - ($reencoder:ident $arg:ident dst_table) => ($reencoder.table_index($arg)); - ($reencoder:ident $arg:ident src_table) => ($reencoder.table_index($arg)); - ($reencoder:ident $arg:ident type_index) => ($reencoder.type_index($arg)); - ($reencoder:ident $arg:ident array_type_index) => ($reencoder.type_index($arg)); - ($reencoder:ident $arg:ident array_type_index_dst) => ($reencoder.type_index($arg)); - ($reencoder:ident $arg:ident array_type_index_src) => ($reencoder.type_index($arg)); - ($reencoder:ident $arg:ident struct_type_index) => ($reencoder.type_index($arg)); - ($reencoder:ident $arg:ident global_index) => ($reencoder.global_index($arg)); - ($reencoder:ident $arg:ident mem) => ($reencoder.memory_index($arg)); - ($reencoder:ident $arg:ident src_mem) => ($reencoder.memory_index($arg)); - ($reencoder:ident $arg:ident dst_mem) => ($reencoder.memory_index($arg)); - ($reencoder:ident $arg:ident data_index) => ($reencoder.data_index($arg)); - ($reencoder:ident $arg:ident elem_index) => ($reencoder.element_index($arg)); - ($reencoder:ident $arg:ident array_data_index) => ($reencoder.data_index($arg)); - ($reencoder:ident $arg:ident array_elem_index) => ($reencoder.element_index($arg)); - ($reencoder:ident $arg:ident blockty) => ($reencoder.block_type($arg)?); - ($reencoder:ident $arg:ident relative_depth) => ($arg); - ($reencoder:ident $arg:ident targets) => (( - $arg - .targets() - .collect::, wasmparser::BinaryReaderError>>()? - .into(), - $arg.default(), - )); - ($reencoder:ident $arg:ident ty) => ($reencoder.val_type($arg)?); - ($reencoder:ident $arg:ident hty) => ($reencoder.heap_type($arg)?); - ($reencoder:ident $arg:ident from_ref_type) => ($reencoder.ref_type($arg)?); - ($reencoder:ident $arg:ident to_ref_type) => ($reencoder.ref_type($arg)?); - ($reencoder:ident $arg:ident memarg) => ($reencoder.mem_arg($arg)); - ($reencoder:ident $arg:ident ordering) => ($reencoder.ordering($arg)); - ($reencoder:ident $arg:ident local_index) => ($arg); - ($reencoder:ident $arg:ident value) => ($arg); - ($reencoder:ident $arg:ident lane) => ($arg); - ($reencoder:ident $arg:ident lanes) => ($arg); - ($reencoder:ident $arg:ident array_size) => ($arg); - ($reencoder:ident $arg:ident field_index) => ($arg); - ($reencoder:ident $arg:ident try_table) => ($arg); - ($reencoder:ident $arg:ident argument_index) => ($reencoder.type_index($arg)); - ($reencoder:ident $arg:ident result_index) => ($reencoder.type_index($arg)); - ($reencoder:ident $arg:ident cont_type_index) => ($reencoder.type_index($arg)); - ($reencoder:ident $arg:ident resume_table) => (( - $arg.handlers.into_iter().map(|h| $reencoder.handle(h)).collect::>().into() - )); - } - - #[rustfmt::skip] - macro_rules! translate_build { - // This case takes the arguments of a wasmparser instruction and creates - // a wasm-encoder instruction. There are a few special cases for where - // the structure of a wasmparser instruction differs from that of - // wasm-encoder. - ($reencoder:ident $op:ident) => (Instruction::$op); - ($reencoder:ident BrTable $arg:ident) => (Instruction::BrTable($arg.0, $arg.1)); - ($reencoder:ident I32Const $arg:ident) => (Instruction::I32Const($arg)); - ($reencoder:ident I64Const $arg:ident) => (Instruction::I64Const($arg)); - ($reencoder:ident F32Const $arg:ident) => (Instruction::F32Const(f32::from_bits($arg.bits()))); - ($reencoder:ident F64Const $arg:ident) => (Instruction::F64Const(f64::from_bits($arg.bits()))); - ($reencoder:ident V128Const $arg:ident) => (Instruction::V128Const($arg.i128())); - ($reencoder:ident TryTable $table:ident) => (Instruction::TryTable($reencoder.block_type($table.ty)?, { - $table.catches.into_iter().map(|c| $reencoder.catch(c)).collect::>().into() - })); - ($reencoder:ident $op:ident $arg:ident) => (Instruction::$op($arg)); - ($reencoder:ident $op:ident $($arg:ident)*) => (Instruction::$op { $($arg),* }); - } - pub fn instruction<'a, T: ?Sized + Reencode>( reencoder: &mut T, arg: wasmparser::Operator<'a>, ) -> Result, Error> { + use crate::Instruction; + macro_rules! translate { ($( @$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => { Ok(match arg { $( wasmparser::Operator::$op $({ $($arg),* })? => { $( - $(let $arg = translate_map!(reencoder $arg $arg);)* + $(let $arg = translate!(map $arg $arg);)* )? - translate_build!(reencoder $op $($($arg)*)?) + translate!(build $op $($($arg)*)?) } )* unexpected => unreachable!("encountered unexpected Wasm operator: {unexpected:?}"), }) }; + + // This case is used to map, based on the name of the field, from the + // wasmparser payload type to the wasm-encoder payload type through + // `Translator` as applicable. + (map $arg:ident tag_index) => (reencoder.tag_index($arg)); + (map $arg:ident function_index) => (reencoder.function_index($arg)); + (map $arg:ident table) => (reencoder.table_index($arg)); + (map $arg:ident table_index) => (reencoder.table_index($arg)); + (map $arg:ident dst_table) => (reencoder.table_index($arg)); + (map $arg:ident src_table) => (reencoder.table_index($arg)); + (map $arg:ident type_index) => (reencoder.type_index($arg)); + (map $arg:ident array_type_index) => (reencoder.type_index($arg)); + (map $arg:ident array_type_index_dst) => (reencoder.type_index($arg)); + (map $arg:ident array_type_index_src) => (reencoder.type_index($arg)); + (map $arg:ident struct_type_index) => (reencoder.type_index($arg)); + (map $arg:ident global_index) => (reencoder.global_index($arg)); + (map $arg:ident mem) => (reencoder.memory_index($arg)); + (map $arg:ident src_mem) => (reencoder.memory_index($arg)); + (map $arg:ident dst_mem) => (reencoder.memory_index($arg)); + (map $arg:ident data_index) => (reencoder.data_index($arg)); + (map $arg:ident elem_index) => (reencoder.element_index($arg)); + (map $arg:ident array_data_index) => (reencoder.data_index($arg)); + (map $arg:ident array_elem_index) => (reencoder.element_index($arg)); + (map $arg:ident blockty) => (reencoder.block_type($arg)?); + (map $arg:ident relative_depth) => ($arg); + (map $arg:ident targets) => (( + $arg + .targets() + .collect::, wasmparser::BinaryReaderError>>()? + .into(), + $arg.default(), + )); + (map $arg:ident ty) => (reencoder.val_type($arg)?); + (map $arg:ident hty) => (reencoder.heap_type($arg)?); + (map $arg:ident from_ref_type) => (reencoder.ref_type($arg)?); + (map $arg:ident to_ref_type) => (reencoder.ref_type($arg)?); + (map $arg:ident memarg) => (reencoder.mem_arg($arg)); + (map $arg:ident ordering) => (reencoder.ordering($arg)); + (map $arg:ident local_index) => ($arg); + (map $arg:ident value) => ($arg); + (map $arg:ident lane) => ($arg); + (map $arg:ident lanes) => ($arg); + (map $arg:ident array_size) => ($arg); + (map $arg:ident field_index) => ($arg); + (map $arg:ident try_table) => ($arg); + (map $arg:ident argument_index) => (reencoder.type_index($arg)); + (map $arg:ident result_index) => (reencoder.type_index($arg)); + (map $arg:ident cont_type_index) => (reencoder.type_index($arg)); + (map $arg:ident resume_table) => (( + $arg.handlers.into_iter().map(|h| reencoder.handle(h)).collect::>().into() + )); + + // This case takes the arguments of a wasmparser instruction and creates + // a wasm-encoder instruction. There are a few special cases for where + // the structure of a wasmparser instruction differs from that of + // wasm-encoder. + (build $op:ident) => (Instruction::$op); + (build BrTable $arg:ident) => (Instruction::BrTable($arg.0, $arg.1)); + (build I32Const $arg:ident) => (Instruction::I32Const($arg)); + (build I64Const $arg:ident) => (Instruction::I64Const($arg)); + (build F32Const $arg:ident) => (Instruction::F32Const(f32::from_bits($arg.bits()))); + (build F64Const $arg:ident) => (Instruction::F64Const(f64::from_bits($arg.bits()))); + (build V128Const $arg:ident) => (Instruction::V128Const($arg.i128())); + (build TryTable $table:ident) => (Instruction::TryTable(reencoder.block_type($table.ty)?, { + $table.catches.into_iter().map(|c| reencoder.catch(c)).collect::>().into() + })); + (build $op:ident $arg:ident) => (Instruction::$op($arg)); + (build $op:ident $($arg:ident)*) => (Instruction::$op { $($arg),* }); } wasmparser::for_each_operator!(translate) diff --git a/crates/wasm-mutate/Cargo.toml b/crates/wasm-mutate/Cargo.toml index 90854ef851..17f12a4a3f 100644 --- a/crates/wasm-mutate/Cargo.toml +++ b/crates/wasm-mutate/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] clap = { workspace = true, optional = true } thiserror = "1.0.28" -wasmparser = { workspace = true, features = ['simd']} +wasmparser = { workspace = true, features = ['simd'] } wasm-encoder = { workspace = true, features = ['wasmparser'] } rand = { workspace = true } log = { workspace = true } diff --git a/crates/wasmparser/src/arity.rs b/crates/wasmparser/src/arity.rs index 6c6faf0cb1..9f60d86ced 100644 --- a/crates/wasmparser/src/arity.rs +++ b/crates/wasmparser/src/arity.rs @@ -240,10 +240,10 @@ impl Operator<'_> { /// an impl ModuleArity, which stores the necessary module state. pub fn operator_arity(&self, module: &impl ModuleArity) -> Option<(u32, u32)> { macro_rules! define_arity { - ( $(@$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) )*) => ( + ($(@$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) )*) => ( match self.clone() { $( - Self::$op $({ $($arg),* })? => { + Operator::$op $({ $($arg),* })? => { $( $(let _ = $arg;)* )? diff --git a/crates/wasmparser/src/lib.rs b/crates/wasmparser/src/lib.rs index aedef0cd1e..b40834b6a2 100644 --- a/crates/wasmparser/src/lib.rs +++ b/crates/wasmparser/src/lib.rs @@ -55,6 +55,18 @@ mod prelude { pub use crate::collections::{IndexMap, Map, Set}; } +/// A helper macro which is used to itself define further macros below. +/// +/// This is a little complicated, so first off sorry about that. The idea here +/// though is that there's one source of truth for the listing of instructions +/// in `wasmparser` and this is the one location. All other locations should be +/// derivative from this. As this one source of truth it has all instructions +/// from all proposals all grouped together. Down below though, for compile +/// time, currently the simd instructions are split out into their own macro. +/// The structure/syntax of this macro is to facilitate easily splitting out +/// entire groups of instructions. +/// +/// This is used below to define more macros. macro_rules! _for_each_operator_group { ($mac:ident) => { $mac! { @@ -769,54 +781,24 @@ macro_rules! _for_each_operator_group { }; } -#[cfg(feature = "simd")] -macro_rules! define_for_each_operator { - ( - $( - @$proposal:ident { - $( $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) )* - } - )* - ) => { - #[macro_export] - #[doc(hidden)] - macro_rules! _for_each_operator { - ($m:ident) => { - $m! { - $( - $( - @$proposal $op $({$($arg: $argty),*})? => $visit ($($ann)*) - )* - )* - } - } - } - }; -} -#[cfg(feature = "simd")] -_for_each_operator_group!(define_for_each_operator); - +/// Helper macro to define a `_for_each_non_simd_operator` which receives +/// the syntax of each instruction individually, but only the non-simd +/// operators. macro_rules! define_for_each_non_simd_operator { + // Switch from `_for_each_operator_group` syntax to this macro's syntax to + // be a "tt muncher macro" (@ $($t:tt)*) => {define_for_each_non_simd_operator!(filter [] @ $($t)*);}; - ( - filter [$($t:tt)*] - @simd { - $( $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) )* - } - $($rest:tt)* - ) => { - define_for_each_non_simd_operator!(filter [$($t)*] $($rest)*); + // filter out simd/relaxed-simd proposals + (filter $filter:tt @simd $simd:tt $($rest:tt)*) => { + define_for_each_non_simd_operator!(filter $filter $($rest)*); }; - ( - filter [$($t:tt)*] - @relaxed_simd { - $( $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) )* - } - $($rest:tt)* - ) => { - define_for_each_non_simd_operator!(filter [$($t)*] $($rest)*); + (filter $filter:tt @relaxed_simd $simd:tt $($rest:tt)*) => { + define_for_each_non_simd_operator!(filter $filter $($rest)*); }; + + // For all other proposals add in tokens where the `@proposal` is prepended + // before each instruction. ( filter [$($t:tt)*] @$proposal:ident { @@ -833,22 +815,66 @@ macro_rules! define_for_each_non_simd_operator { ); }; + // At the end the `$t` list here is how we want to define + // `_for_each_non_simd_operator`, so define the macro with these tokens. (filter [$($t:tt)*]) => { #[macro_export] #[doc(hidden)] - macro_rules! _for_each_non_simd_operator { + macro_rules! _for_each_visit_operator_impl { ($m:ident) => { $m! { $($t)* } } } + + // When simd is disabled then this macro is additionally the + // `for_each_operator!` macro implementation + #[cfg(not(feature = "simd"))] + #[doc(hidden)] + pub use _for_each_visit_operator_impl as _for_each_operator_impl; }; } _for_each_operator_group!(define_for_each_non_simd_operator); +/// When the simd feature is enabled then `_for_each_operator_impl` is defined +/// to be the same as the above `define_for_each_non_simd_operator` macro except +/// with all proposals thrown in. +#[cfg(feature = "simd")] +macro_rules! define_for_each_operator_impl_with_simd { + ( + $( + @$proposal:ident { + $( $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) )* + } + )* + ) => { + #[macro_export] + #[doc(hidden)] + macro_rules! _for_each_operator_impl { + ($m:ident) => { + $m! { + $( + $( + @$proposal $op $({$($arg: $argty),*})? => $visit ($($ann)*) + )* + )* + } + } + } + }; +} +#[cfg(feature = "simd")] +_for_each_operator_group!(define_for_each_operator_impl_with_simd); + +/// Helper macro to define the `_for_each_simd_operator_impl` macro. +/// +/// This is basically the same as `define_for_each_non_simd_operator` above +/// except that it's filtering on different proposals. #[cfg(feature = "simd")] macro_rules! define_for_each_simd_operator { + // Switch to "tt muncher" mode (@ $($t:tt)*) => {define_for_each_simd_operator!(filter [] @ $($t)*);}; + // Collect the `@simd` and `@relaxed_simd` proposals. ( filter [$($t:tt)*] @simd { @@ -879,20 +905,17 @@ macro_rules! define_for_each_simd_operator { $($rest)* ); }; - ( - filter [$($t:tt)*] - @$proposal:ident { - $( $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*) )* - } - $($rest:tt)* - ) => { - define_for_each_simd_operator!(filter [$($t)*] $($rest)*); + + // Skip all other proposals. + (filter $filter:tt @$proposal:ident $instrs:tt $($rest:tt)*) => { + define_for_each_simd_operator!(filter $filter $($rest)*); }; + // Base case to define the base macro. (filter [$($t:tt)*]) => { #[macro_export] #[doc(hidden)] - macro_rules! _for_each_simd_operator { + macro_rules! _for_each_visit_simd_operator_impl { ($m:ident) => { $m! { $($t)* } } @@ -902,14 +925,6 @@ macro_rules! define_for_each_simd_operator { #[cfg(feature = "simd")] _for_each_operator_group!(define_for_each_simd_operator); -#[cfg(not(feature = "simd"))] -#[doc(hidden)] -pub use _for_each_non_simd_operator as _for_each_operator_delegate; - -#[cfg(feature = "simd")] -#[doc(hidden)] -pub use _for_each_operator as _for_each_operator_delegate; - /// Used to implement routines for the [`Operator`] enum. /// /// A helper macro to conveniently iterate over all opcodes recognized by this @@ -1038,7 +1053,7 @@ pub use _for_each_operator as _for_each_operator_delegate; /// } /// ``` #[doc(inline)] -pub use _for_each_operator_delegate as for_each_operator; +pub use _for_each_operator_impl as for_each_operator; /// Used to implement the [`VisitOperator`] trait. /// @@ -1194,7 +1209,7 @@ pub use _for_each_operator_delegate as for_each_operator; /// } /// ``` #[doc(inline)] -pub use _for_each_non_simd_operator as for_each_visit_operator; +pub use _for_each_visit_operator_impl as for_each_visit_operator; /// Used to implement the [`VisitSimdOperator`] trait. /// @@ -1215,7 +1230,7 @@ pub use _for_each_non_simd_operator as for_each_visit_operator; /// [`VisitSimdOperator`]: crate::VisitSimdOperator #[cfg(feature = "simd")] #[doc(inline)] -pub use _for_each_simd_operator as for_each_visit_simd_operator; +pub use _for_each_visit_simd_operator_impl as for_each_visit_simd_operator; macro_rules! format_err { ($offset:expr, $($arg:tt)*) => { From d8fddc833303e38fd31c869f37b3bb5b92d3cd58 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Tue, 26 Nov 2024 14:34:33 -0800 Subject: [PATCH 83/83] A few more minor adjustments --- crates/wasmparser/src/lib.rs | 9 +++++++-- crates/wasmparser/src/readers/core/operators.rs | 16 +++++++--------- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/crates/wasmparser/src/lib.rs b/crates/wasmparser/src/lib.rs index b40834b6a2..7a7d8b027c 100644 --- a/crates/wasmparser/src/lib.rs +++ b/crates/wasmparser/src/lib.rs @@ -937,7 +937,7 @@ _for_each_operator_group!(define_for_each_simd_operator); /// /// This is an "iterator macro" where this macro is invoked with the name of /// another macro, and then that macro is invoked with the list of all -/// operators. An example invocation of this looks like: +/// operators. /// /// The list of specializable Wasm proposals is as follows: /// @@ -1067,7 +1067,7 @@ pub use _for_each_operator_impl as for_each_operator; /// /// This is an "iterator macro" where this macro is invoked with the name of /// another macro, and then that macro is invoked with the list of all -/// operators. An example invocation of this looks like: +/// operators. /// /// The list of specializable Wasm proposals is as follows: /// @@ -1083,6 +1083,11 @@ pub use _for_each_operator_impl as for_each_operator; /// - `@stack_switching`: [Wasm `stack-switching` proposal] /// - `@wide_arithmetic`: [Wasm `wide-arithmetic` proposal] /// +/// Note that this macro does not iterate over the SIMD-related proposals. Those +/// are provided in [`VisitSimdOperator`] and [`for_each_visit_simd_operator`]. +/// This macro only handles non-SIMD related operators and so users wanting to +/// handle the SIMD-class of operators need to use that trait/macro as well. +/// /// [Wasm `exception-handling` proposal]: /// https://github.com/WebAssembly/exception-handling /// diff --git a/crates/wasmparser/src/readers/core/operators.rs b/crates/wasmparser/src/readers/core/operators.rs index 48f81cba2d..80e71d3e5c 100644 --- a/crates/wasmparser/src/readers/core/operators.rs +++ b/crates/wasmparser/src/readers/core/operators.rs @@ -421,12 +421,10 @@ pub trait VisitOperator<'a> { macro_rules! visit_operator { ($(@$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => {{ match op { - $( Operator::$op $({ $($arg),* })? => return self.$visit($($($arg.clone()),*)?), )* + $( Operator::$op $({ $($arg),* })? => self.$visit($($($arg.clone()),*)?), )* #[cfg(feature = "simd")] - _ => {}, - }; - #[cfg(feature = "simd")] - _visit_simd_operator(self, op) + other => visit_simd_operator(self, other), + } }}; } crate::for_each_visit_operator!(visit_operator) @@ -465,15 +463,15 @@ pub trait VisitOperator<'a> { /// Special handler for visiting `simd` and `relaxed-simd` [`Operator`] variants. #[cfg(feature = "simd")] -fn _visit_simd_operator<'a, V>(visitor: &mut V, op: &Operator<'a>) -> V::Output +fn visit_simd_operator<'a, V>(visitor: &mut V, op: &Operator<'a>) -> V::Output where V: VisitOperator<'a> + ?Sized, { + let Some(simd_visitor) = visitor.simd_visitor() else { + panic!("missing SIMD visitor to visit operator: {op:?}") + }; macro_rules! visit_simd_operator { ($(@$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident ($($ann:tt)*))*) => {{ - let Some(simd_visitor) = visitor.simd_visitor() else { - panic!("missing SIMD visitor to visit operator: {op:?}") - }; match op { $( Operator::$op $({ $($arg),* })? => simd_visitor.$visit($($($arg.clone()),*)?), )* unexpected => unreachable!("unexpected non-SIMD operator: {unexpected:?}"),