v8/v8
0

[mips32] Delete mips32 from v8

Bug: v8:13206
Change-Id: Ifb5daeff2a1e91fd098bc5abe9f81339575636bf
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3837160
Reviewed-by: Hannes Payer <hpayer@chromium.org>
Reviewed-by: Jakob Linke <jgruber@chromium.org>
Reviewed-by: Michael Achenbach <machenbach@chromium.org>
Auto-Submit: Liu Yu <liuyu@loongson.cn>
Commit-Queue: Liu Yu <liuyu@loongson.cn>
Cr-Commit-Position: refs/heads/main@{#83148}
This commit is contained in:
Liu Yu
2022-09-08 19:39:11 +08:00
committed by V8 LUCI CQ
parent 36559d91ca
commit a26ca5ed14
117 changed files with 149 additions and 61418 deletions
BUILD.gnLICENSE
gni
include
src
base
baseline
builtins
codegen
common
compiler
deoptimizer
diagnostics
execution
flags
heap
interpreter
libsampler
logging
objects
profiler
regexp
runtime
snapshot
utils
wasm
test
tools

102
BUILD.gn

@ -1119,49 +1119,11 @@ config("toolchain") {
}
}
# Mips64el/mipsel simulators.
if (target_is_simulator &&
(v8_current_cpu == "mipsel" || v8_current_cpu == "mips64el")) {
# Mips64el simulators.
if (target_is_simulator && v8_current_cpu == "mips64el") {
defines += [ "_MIPS_TARGET_SIMULATOR" ]
}
if (v8_current_cpu == "mipsel" || v8_current_cpu == "mips") {
defines += [ "V8_TARGET_ARCH_MIPS" ]
if (v8_can_use_fpu_instructions) {
defines += [ "CAN_USE_FPU_INSTRUCTIONS" ]
}
if (v8_use_mips_abi_hardfloat) {
defines += [
"__mips_hard_float=1",
"CAN_USE_FPU_INSTRUCTIONS",
]
} else {
defines += [ "__mips_soft_float=1" ]
}
if (mips_arch_variant == "r6") {
defines += [
"_MIPS_ARCH_MIPS32R6",
"FPU_MODE_FP64",
]
if (mips_use_msa) {
defines += [ "_MIPS_MSA" ]
}
} else if (mips_arch_variant == "r2") {
defines += [ "_MIPS_ARCH_MIPS32R2" ]
if (mips_fpu_mode == "fp64") {
defines += [ "FPU_MODE_FP64" ]
} else if (mips_fpu_mode == "fpxx") {
defines += [ "FPU_MODE_FPXX" ]
} else if (mips_fpu_mode == "fp32") {
defines += [ "FPU_MODE_FP32" ]
}
} else if (mips_arch_variant == "r1") {
defines += [ "FPU_MODE_FP32" ]
}
# TODO(infra): Add support for mips_arch_variant rx and loongson.
}
if (v8_current_cpu == "mips64el" || v8_current_cpu == "mips64") {
defines += [ "V8_TARGET_ARCH_MIPS64" ]
if (v8_can_use_fpu_instructions) {
@ -1335,6 +1297,7 @@ config("toolchain") {
if (is_clang) {
cflags += [
"-Wmissing-field-initializers",
"-Wunreachable-code",
# Google3 enables this warning, so we should also enable it to find issue
# earlier. See https://reviews.llvm.org/D56731 for details about this
@ -1345,11 +1308,6 @@ config("toolchain") {
"-Wno-shadow",
]
if (v8_current_cpu != "mips" && v8_current_cpu != "mipsel") {
# We exclude MIPS because the IsMipsArchVariant macro causes trouble.
cflags += [ "-Wunreachable-code" ]
}
if (v8_current_cpu == "x64" || v8_current_cpu == "arm64" ||
v8_current_cpu == "mips64el" || v8_current_cpu == "riscv64") {
cflags += [ "-Wshorten-64-to-32" ]
@ -1609,8 +1567,7 @@ if (is_android && enable_java_templates) {
if (v8_use_external_startup_data) {
deps = [ "//v8" ]
renaming_sources = [ "$root_out_dir/snapshot_blob.bin" ]
if (current_cpu == "arm" || current_cpu == "x86" ||
current_cpu == "mipsel") {
if (current_cpu == "arm" || current_cpu == "x86") {
renaming_destinations = [ "snapshot_blob_32.bin" ]
} else {
renaming_destinations = [ "snapshot_blob_64.bin" ]
@ -2340,8 +2297,7 @@ action("v8_dump_build_config") {
"v8_enable_cet_shadow_stack=$v8_enable_cet_shadow_stack",
]
if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel" ||
v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
args += [
"mips_arch_variant=\"$mips_arch_variant\"",
"mips_use_msa=$mips_use_msa",
@ -2514,11 +2470,6 @@ v8_source_set("v8_initializers") {
### gcmole(arch:arm64) ###
"src/builtins/arm64/builtins-arm64.cc",
]
} else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") {
sources += [
### gcmole(arch:mipsel) ###
"src/builtins/mips/builtins-mips.cc",
]
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
sources += [
### gcmole(arch:mips64el) ###
@ -3898,22 +3849,6 @@ v8_header_set("v8_internal_headers") {
if (is_win) {
sources += [ "src/diagnostics/unwinding-info-win64.h" ]
}
} else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") {
sources += [ ### gcmole(arch:mipsel) ###
"src/baseline/mips/baseline-assembler-mips-inl.h",
"src/baseline/mips/baseline-compiler-mips-inl.h",
"src/codegen/mips/assembler-mips-inl.h",
"src/codegen/mips/assembler-mips.h",
"src/codegen/mips/constants-mips.h",
"src/codegen/mips/macro-assembler-mips.h",
"src/codegen/mips/register-mips.h",
"src/codegen/mips/reglist-mips.h",
"src/compiler/backend/mips/instruction-codes-mips.h",
"src/execution/mips/frame-constants-mips.h",
"src/execution/mips/simulator-mips.h",
"src/regexp/mips/regexp-macro-assembler-mips.h",
"src/wasm/baseline/mips/liftoff-assembler-mips.h",
]
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
sources += [ ### gcmole(arch:mips64el) ###
"src/baseline/mips64/baseline-assembler-mips64-inl.h",
@ -5004,23 +4939,6 @@ v8_source_set("v8_base_without_compiler") {
if (is_win) {
sources += [ "src/diagnostics/unwinding-info-win64.cc" ]
}
} else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") {
sources += [ ### gcmole(arch:mipsel) ###
"src/codegen/mips/assembler-mips.cc",
"src/codegen/mips/constants-mips.cc",
"src/codegen/mips/cpu-mips.cc",
"src/codegen/mips/interface-descriptors-mips-inl.h",
"src/codegen/mips/macro-assembler-mips.cc",
"src/compiler/backend/mips/code-generator-mips.cc",
"src/compiler/backend/mips/instruction-scheduler-mips.cc",
"src/compiler/backend/mips/instruction-selector-mips.cc",
"src/deoptimizer/mips/deoptimizer-mips.cc",
"src/diagnostics/mips/disasm-mips.cc",
"src/diagnostics/mips/unwinder-mips.cc",
"src/execution/mips/frame-constants-mips.cc",
"src/execution/mips/simulator-mips.cc",
"src/regexp/mips/regexp-macro-assembler-mips.cc",
]
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
sources += [ ### gcmole(arch:mips64el) ###
"src/codegen/mips64/assembler-mips64.cc",
@ -5246,8 +5164,7 @@ v8_source_set("v8_base_without_compiler") {
# Platforms that don't have CAS support need to link atomic library
# to implement atomic memory access
if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel" ||
v8_current_cpu == "mips64" || v8_current_cpu == "mips64el" ||
if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el" ||
v8_current_cpu == "ppc" || v8_current_cpu == "ppc64" ||
v8_current_cpu == "s390" || v8_current_cpu == "s390x" ||
v8_current_cpu == "riscv64" || v8_current_cpu == "riscv32") {
@ -5637,7 +5554,7 @@ v8_component("v8_libbase") {
data_deps += [ "//build/win:runtime_libs" ]
}
if (v8_current_cpu == "mips" || v8_current_cpu == "mips64") {
if (v8_current_cpu == "mips64") {
# Add runtime libs for mips.
data += [
"tools/mips_toolchain/sysroot/usr/lib/",
@ -5645,8 +5562,7 @@ v8_component("v8_libbase") {
]
}
if (is_ubsan && (v8_current_cpu == "x86" || v8_current_cpu == "arm" ||
v8_current_cpu == "mips")) {
if (is_ubsan && (v8_current_cpu == "x86" || v8_current_cpu == "arm")) {
# Special UBSan 32-bit requirement.
sources += [ "src/base/ubsan.cc" ]
}
@ -5826,8 +5742,6 @@ v8_source_set("v8_heap_base") {
sources += [ "src/heap/base/asm/ppc/push_registers_asm.cc" ]
} else if (current_cpu == "s390x") {
sources += [ "src/heap/base/asm/s390/push_registers_asm.cc" ]
} else if (current_cpu == "mipsel") {
sources += [ "src/heap/base/asm/mips/push_registers_asm.cc" ]
} else if (current_cpu == "mips64el") {
sources += [ "src/heap/base/asm/mips64/push_registers_asm.cc" ]
} else if (current_cpu == "loong64") {

@ -15,8 +15,7 @@ are:
- Strongtalk assembler, the basis of the files assembler-arm-inl.h,
assembler-arm.cc, assembler-arm.h, assembler-ia32-inl.h,
assembler-ia32.cc, assembler-ia32.h, assembler-x64-inl.h,
assembler-x64.cc, assembler-x64.h, assembler-mips-inl.h,
assembler-mips.cc, assembler-mips.h, assembler.cc and assembler.h.
assembler-x64.cc, assembler-x64.h, assembler.cc and assembler.h.
This code is copyrighted by Sun Microsystems Inc. and released
under a 3-clause BSD license.

@ -64,8 +64,7 @@ if (v8_snapshot_toolchain == "") {
current_cpu == "arm") {
# Trying to compile 32-bit arm on arm64. Good luck!
v8_snapshot_toolchain = current_toolchain
} else if (host_cpu == "x64" &&
(v8_current_cpu == "mips" || v8_current_cpu == "mips64")) {
} else if (host_cpu == "x64" && v8_current_cpu == "mips64") {
# We don't support snapshot generation for big-endian targets,
# therefore snapshots will need to be built using native mksnapshot
# in combination with qemu
@ -96,8 +95,7 @@ if (v8_snapshot_toolchain == "") {
} else {
_cpus = "x64_v8_${v8_current_cpu}"
}
} else if (v8_current_cpu == "arm" || v8_current_cpu == "mipsel" ||
v8_current_cpu == "riscv32") {
} else if (v8_current_cpu == "arm" || v8_current_cpu == "riscv32") {
_cpus = "x86_v8_${v8_current_cpu}"
} else {
# This branch should not be reached; leave _cpus blank so the assert
@ -122,7 +120,6 @@ assert(v8_snapshot_toolchain != "",
# avoid building v8_libbase on the host more than once. On mips with big endian,
# the snapshot toolchain is the target toolchain and, hence, can't be used.
v8_generator_toolchain = v8_snapshot_toolchain
if (host_cpu == "x64" &&
(v8_current_cpu == "mips" || v8_current_cpu == "mips64")) {
if (host_cpu == "x64" && v8_current_cpu == "mips64") {
v8_generator_toolchain = "//build/toolchain/linux:clang_x64"
}

@ -199,8 +199,7 @@ if ((is_posix || is_fuchsia) &&
}
# On MIPS gcc_target_rpath and ldso_path might be needed for all builds.
if (target_cpu == "mipsel" || target_cpu == "mips64el" ||
target_cpu == "mips" || target_cpu == "mips64") {
if (target_cpu == "mips64el" || target_cpu == "mips64") {
v8_add_configs += [ "//build/config/gcc:rpath_for_built_shared_libraries" ]
}

@ -17,10 +17,10 @@ struct CalleeSavedRegisters {
void* arm_r9;
void* arm_r10;
};
#elif V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC || \
V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_S390 || \
V8_TARGET_ARCH_LOONG64 || V8_TARGET_ARCH_RISCV32
#elif V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || \
V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_LOONG64 || \
V8_TARGET_ARCH_RISCV32
struct CalleeSavedRegisters {};
#else
#error Target architecture was not detected as supported by v8

@ -653,9 +653,6 @@ V8 shared library set USING_V8_SHARED.
#elif defined(__mips64)
#define V8_HOST_ARCH_MIPS64 1
#define V8_HOST_ARCH_64_BIT 1
#elif defined(__MIPSEB__) || defined(__MIPSEL__)
#define V8_HOST_ARCH_MIPS 1
#define V8_HOST_ARCH_32_BIT 1
#elif defined(__loongarch64)
#define V8_HOST_ARCH_LOONG64 1
#define V8_HOST_ARCH_64_BIT 1
@ -691,10 +688,10 @@ V8 shared library set USING_V8_SHARED.
// The macros may be set externally. If not, detect in the same way as the host
// architecture, that is, target the native environment as presented by the
// compiler.
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && \
!V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64 && \
!V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_S390 && \
!V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64 && \
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && \
!V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_PPC && \
!V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_S390 && \
!V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64 && \
!V8_TARGET_ARCH_RISCV32
#if defined(_M_X64) || defined(__x86_64__)
#define V8_TARGET_ARCH_X64 1
@ -706,8 +703,6 @@ V8 shared library set USING_V8_SHARED.
#define V8_TARGET_ARCH_ARM 1
#elif defined(__mips64)
#define V8_TARGET_ARCH_MIPS64 1
#elif defined(__MIPSEB__) || defined(__MIPSEL__)
#define V8_TARGET_ARCH_MIPS 1
#elif defined(_ARCH_PPC64)
#define V8_TARGET_ARCH_PPC64 1
#elif defined(_ARCH_PPC)
@ -785,9 +780,6 @@ V8 shared library set USING_V8_SHARED.
#if (V8_TARGET_ARCH_ARM64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_ARM64))
#error Target architecture arm64 is only supported on arm64 and x64 host
#endif
#if (V8_TARGET_ARCH_MIPS && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_MIPS))
#error Target architecture mips is only supported on mips and ia32 host
#endif
#if (V8_TARGET_ARCH_MIPS64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_MIPS64))
#error Target architecture mips64 is only supported on mips64 and x64 host
#endif
@ -812,12 +804,6 @@ V8 shared library set USING_V8_SHARED.
#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_LOONG64
#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_MIPS
#if defined(__MIPSEB__)
#define V8_TARGET_BIG_ENDIAN 1
#else
#define V8_TARGET_LITTLE_ENDIAN 1
#endif
#elif V8_TARGET_ARCH_MIPS64
#if defined(__MIPSEB__) || defined(V8_TARGET_ARCH_MIPS64_BE)
#define V8_TARGET_BIG_ENDIAN 1

@ -98,10 +98,10 @@
// do not support adding noexcept to default members.
// Disabled on MSVC because constructors of standard containers are not noexcept
// there.
#if ((!defined(V8_CC_GNU) && !defined(V8_CC_MSVC) && \
!defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64) && \
!defined(V8_TARGET_ARCH_PPC) && !defined(V8_TARGET_ARCH_PPC64) && \
!defined(V8_TARGET_ARCH_RISCV64) && !defined(V8_TARGET_ARCH_RISCV32)) || \
#if ((!defined(V8_CC_GNU) && !defined(V8_CC_MSVC) && \
!defined(V8_TARGET_ARCH_MIPS64) && !defined(V8_TARGET_ARCH_PPC) && \
!defined(V8_TARGET_ARCH_PPC64) && !defined(V8_TARGET_ARCH_RISCV64) && \
!defined(V8_TARGET_ARCH_RISCV32)) || \
(defined(__clang__) && __cplusplus > 201300L))
#define V8_NOEXCEPT noexcept
#else

@ -89,8 +89,8 @@ static V8_INLINE void __cpuid(int cpu_info[4], int info_type) {
#endif // !V8_LIBC_MSVCRT
#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 || V8_HOST_ARCH_MIPS || \
V8_HOST_ARCH_MIPS64 || V8_HOST_ARCH_RISCV64
#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 || V8_HOST_ARCH_MIPS64 || \
V8_HOST_ARCH_RISCV64
#if V8_OS_LINUX
@ -198,48 +198,6 @@ static uint32_t ReadELFHWCaps() {
#endif // V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64
#if V8_HOST_ARCH_MIPS
int __detect_fp64_mode(void) {
double result = 0;
// Bit representation of (double)1 is 0x3FF0000000000000.
__asm__ volatile(
".set push\n\t"
".set noreorder\n\t"
".set oddspreg\n\t"
"lui $t0, 0x3FF0\n\t"
"ldc1 $f0, %0\n\t"
"mtc1 $t0, $f1\n\t"
"sdc1 $f0, %0\n\t"
".set pop\n\t"
: "+m"(result)
:
: "t0", "$f0", "$f1", "memory");
return !(result == 1);
}
int __detect_mips_arch_revision(void) {
// TODO(dusmil): Do the specific syscall as soon as it is implemented in mips
// kernel.
uint32_t result = 0;
__asm__ volatile(
"move $v0, $zero\n\t"
// Encoding for "addi $v0, $v0, 1" on non-r6,
// which is encoding for "bovc $v0, %v0, 1" on r6.
// Use machine code directly to avoid compilation errors with different
// toolchains and maintain compatibility.
".word 0x20420001\n\t"
"sw $v0, %0\n\t"
: "=m"(result)
:
: "v0", "memory");
// Result is 0 on r6 architectures, 1 on other architecture revisions.
// Fall-back to the least common denominator which is mips32 revision 1.
return result ? 1 : 6;
}
#endif // V8_HOST_ARCH_MIPS
// Extract the information exposed by the kernel via /proc/cpuinfo.
class CPUInfo final {
public:
@ -359,7 +317,7 @@ static bool HasListItem(const char* list, const char* item) {
#endif // V8_OS_LINUX
#endif // V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 ||
// V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64 || V8_HOST_ARCH_RISCV64
// V8_HOST_ARCH_MIPS64 || V8_HOST_ARCH_RISCV64
#if defined(V8_OS_STARBOARD)
@ -742,7 +700,7 @@ CPU::CPU()
#endif // V8_OS_LINUX
#elif V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
#elif V8_HOST_ARCH_MIPS64
// Simple detection of FPU at runtime for Linux.
// It is based on /proc/cpuinfo, which reveals hardware configuration
@ -756,10 +714,6 @@ CPU::CPU()
has_msa_ = HasListItem(ASEs, "msa");
delete[] cpu_model;
delete[] ASEs;
#ifdef V8_HOST_ARCH_MIPS
is_fp64_mode_ = __detect_fp64_mode();
architecture_ = __detect_mips_arch_revision();
#endif
#elif V8_HOST_ARCH_ARM64
#ifdef V8_OS_WIN

@ -153,7 +153,7 @@ static bool DoubleStrtod(Vector<const char> trimmed, int exponent,
// result is not accurate.
// We know that Windows32 with MSVC, unlike with MinGW32, uses 64 bits and is
// therefore accurate.
// Note that the ARM and MIPS simulators are compiled for 32bits. They
// Note that the ARM simulators are compiled for 32bits. They
// therefore exhibit the same problem.
USE(exact_powers_of_ten);
USE(kMaxExactDoubleIntegerDecimalDigits);

@ -43,11 +43,6 @@
#elif defined(V8_HOST_ARCH_ARM64) || \
(defined(V8_HOST_ARCH_ARM) && __ARM_ARCH >= 6)
#define YIELD_PROCESSOR __asm__ __volatile__("yield")
#elif defined(V8_HOST_ARCH_MIPS)
// The MIPS32 docs state that the PAUSE instruction is a no-op on older
// architectures (first added in MIPS32r2). To avoid assembler errors when
// targeting pre-r2, we must encode the instruction manually.
#define YIELD_PROCESSOR __asm__ __volatile__(".word 0x00000140")
#elif defined(V8_HOST_ARCH_MIPS64EL) && __mips_isa_rev >= 2
// Don't bother doing using .word here since r2 is the lowest supported mips64
// that Chromium supports.

@ -36,8 +36,6 @@
#include "src/baseline/riscv/baseline-assembler-riscv-inl.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/baseline/mips64/baseline-assembler-mips64-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/baseline/mips/baseline-assembler-mips-inl.h"
#elif V8_TARGET_ARCH_LOONG64
#include "src/baseline/loong64/baseline-assembler-loong64-inl.h"
#else

@ -53,8 +53,6 @@
#include "src/baseline/riscv/baseline-compiler-riscv-inl.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/baseline/mips64/baseline-compiler-mips64-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/baseline/mips/baseline-compiler-mips-inl.h"
#elif V8_TARGET_ARCH_LOONG64
#include "src/baseline/loong64/baseline-compiler-loong64-inl.h"
#else

@ -1,573 +0,0 @@
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASELINE_MIPS_BASELINE_ASSEMBLER_MIPS_INL_H_
#define V8_BASELINE_MIPS_BASELINE_ASSEMBLER_MIPS_INL_H_
#include "src/baseline/baseline-assembler.h"
#include "src/codegen/interface-descriptors.h"
#include "src/codegen/mips/assembler-mips-inl.h"
#include "src/objects/literal-objects-inl.h"
namespace v8 {
namespace internal {
namespace baseline {
class BaselineAssembler::ScratchRegisterScope {
public:
explicit ScratchRegisterScope(BaselineAssembler* assembler)
: assembler_(assembler),
prev_scope_(assembler->scratch_register_scope_),
wrapped_scope_(assembler->masm()) {
if (!assembler_->scratch_register_scope_) {
// If we haven't opened a scratch scope yet, for the first one add a
// couple of extra registers.
wrapped_scope_.Include({t4, t5, t6, t7});
}
assembler_->scratch_register_scope_ = this;
}
~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; }
Register AcquireScratch() { return wrapped_scope_.Acquire(); }
private:
BaselineAssembler* assembler_;
ScratchRegisterScope* prev_scope_;
UseScratchRegisterScope wrapped_scope_;
};
enum class Condition : uint32_t {
kEqual = eq,
kNotEqual = ne,
kLessThan = lt,
kGreaterThan = gt,
kLessThanEqual = le,
kGreaterThanEqual = ge,
kUnsignedLessThan = Uless,
kUnsignedGreaterThan = Ugreater,
kUnsignedLessThanEqual = Uless_equal,
kUnsignedGreaterThanEqual = Ugreater_equal,
kOverflow = overflow,
kNoOverflow = no_overflow,
kZero = eq,
kNotZero = ne,
};
inline internal::Condition AsMasmCondition(Condition cond) {
// This is important for arm, where the internal::Condition where each value
// represents an encoded bit field value.
static_assert(sizeof(internal::Condition) == sizeof(Condition));
return static_cast<internal::Condition>(cond);
}
namespace detail {
#ifdef DEBUG
inline bool Clobbers(Register target, MemOperand op) {
return op.is_reg() && op.rm() == target;
}
#endif
} // namespace detail
#define __ masm_->
MemOperand BaselineAssembler::RegisterFrameOperand(
interpreter::Register interpreter_register) {
return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize);
}
void BaselineAssembler::RegisterFrameAddress(
interpreter::Register interpreter_register, Register rscratch) {
return __ Addu(rscratch, fp,
interpreter_register.ToOperand() * kSystemPointerSize);
}
MemOperand BaselineAssembler::FeedbackVectorOperand() {
return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp);
}
void BaselineAssembler::Bind(Label* label) { __ bind(label); }
void BaselineAssembler::JumpTarget() {
// NOP.
}
void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
__ Branch(target);
}
void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
Label* target, Label::Distance) {
__ JumpIfRoot(value, index, target);
}
void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
Label* target, Label::Distance) {
__ JumpIfNotRoot(value, index, target);
}
void BaselineAssembler::JumpIfSmi(Register value, Label* target,
Label::Distance) {
__ JumpIfSmi(value, target);
}
void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
Label::Distance) {
__ JumpIfNotSmi(value, target);
}
void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
Label* target,
Label::Distance distance) {
JumpIf(cc, left, Operand(right), target, distance);
}
void BaselineAssembler::CallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(masm_,
__ CommentForOffHeapTrampoline("call", builtin));
Register temp = t9;
__ LoadEntryFromBuiltin(builtin, temp);
__ Call(temp);
}
void BaselineAssembler::TailCallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(masm_,
__ CommentForOffHeapTrampoline("tail call", builtin));
Register temp = t9;
__ LoadEntryFromBuiltin(builtin, temp);
__ Jump(temp);
}
void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
Label* target, Label::Distance) {
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ And(scratch, value, Operand(mask));
__ Branch(target, AsMasmCondition(cc), scratch, Operand(zero_reg));
}
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
Label* target, Label::Distance) {
__ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs));
}
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type,
Register map, Label* target,
Label::Distance) {
ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch();
__ GetObjectType(object, map, type);
__ Branch(target, AsMasmCondition(cc), type, Operand(instance_type));
}
void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
InstanceType instance_type,
Label* target, Label::Distance) {
ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch();
if (v8_flags.debug_code) {
__ AssertNotSmi(map);
__ GetObjectType(map, type, type);
__ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE));
}
__ Lw(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(target, AsMasmCondition(cc), type, Operand(instance_type));
}
void BaselineAssembler::JumpIfPointer(Condition cc, Register value,
MemOperand operand, Label* target,
Label::Distance) {
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ Lw(scratch, operand);
__ Branch(target, AsMasmCondition(cc), value, Operand(scratch));
}
void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi,
Label* target, Label::Distance) {
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ li(scratch, Operand(smi));
__ SmiUntag(scratch);
__ Branch(target, AsMasmCondition(cc), value, Operand(scratch));
}
void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
Label* target, Label::Distance) {
__ AssertSmi(lhs);
__ AssertSmi(rhs);
__ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs));
}
void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
MemOperand operand, Label* target,
Label::Distance) {
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ Lw(scratch, operand);
__ Branch(target, AsMasmCondition(cc), value, Operand(scratch));
}
void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
Register value, Label* target,
Label::Distance) {
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ Lw(scratch, operand);
__ Branch(target, AsMasmCondition(cc), scratch, Operand(value));
}
void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
Label* target, Label::Distance) {
__ Branch(target, AsMasmCondition(cc), value, Operand(byte));
}
void BaselineAssembler::Move(interpreter::Register output, Register source) {
Move(RegisterFrameOperand(output), source);
}
void BaselineAssembler::Move(Register output, TaggedIndex value) {
__ li(output, Operand(value.ptr()));
}
void BaselineAssembler::Move(MemOperand output, Register source) {
__ Sw(source, output);
}
void BaselineAssembler::Move(Register output, ExternalReference reference) {
__ li(output, Operand(reference));
}
void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
__ li(output, Operand(value));
}
void BaselineAssembler::Move(Register output, int32_t value) {
__ li(output, Operand(value));
}
void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
__ Move(output, source);
}
void BaselineAssembler::MoveSmi(Register output, Register source) {
__ Move(output, source);
}
namespace detail {
template <typename Arg>
inline Register ToRegister(BaselineAssembler* basm,
BaselineAssembler::ScratchRegisterScope* scope,
Arg arg) {
Register reg = scope->AcquireScratch();
basm->Move(reg, arg);
return reg;
}
inline Register ToRegister(BaselineAssembler* basm,
BaselineAssembler::ScratchRegisterScope* scope,
Register reg) {
return reg;
}
template <typename... Args>
struct PushAllHelper;
template <>
struct PushAllHelper<> {
static int Push(BaselineAssembler* basm) { return 0; }
static int PushReverse(BaselineAssembler* basm) { return 0; }
};
// TODO(ishell): try to pack sequence of pushes into one instruction by
// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4)
// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4).
template <typename Arg>
struct PushAllHelper<Arg> {
static int Push(BaselineAssembler* basm, Arg arg) {
BaselineAssembler::ScratchRegisterScope scope(basm);
basm->masm()->Push(ToRegister(basm, &scope, arg));
return 1;
}
static int PushReverse(BaselineAssembler* basm, Arg arg) {
return Push(basm, arg);
}
};
// TODO(ishell): try to pack sequence of pushes into one instruction by
// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4)
// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4).
template <typename Arg, typename... Args>
struct PushAllHelper<Arg, Args...> {
static int Push(BaselineAssembler* basm, Arg arg, Args... args) {
PushAllHelper<Arg>::Push(basm, arg);
return 1 + PushAllHelper<Args...>::Push(basm, args...);
}
static int PushReverse(BaselineAssembler* basm, Arg arg, Args... args) {
int nargs = PushAllHelper<Args...>::PushReverse(basm, args...);
PushAllHelper<Arg>::Push(basm, arg);
return nargs + 1;
}
};
template <>
struct PushAllHelper<interpreter::RegisterList> {
static int Push(BaselineAssembler* basm, interpreter::RegisterList list) {
for (int reg_index = 0; reg_index < list.register_count(); ++reg_index) {
PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
}
return list.register_count();
}
static int PushReverse(BaselineAssembler* basm,
interpreter::RegisterList list) {
for (int reg_index = list.register_count() - 1; reg_index >= 0;
--reg_index) {
PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
}
return list.register_count();
}
};
template <typename... T>
struct PopAllHelper;
template <>
struct PopAllHelper<> {
static void Pop(BaselineAssembler* basm) {}
};
// TODO(ishell): try to pack sequence of pops into one instruction by
// looking at regiser codes. For example, Pop(r1, r2, r5, r0, r3, r4)
// could be generated as two pops: Pop(r1, r2, r5) and Pop(r0, r3, r4).
template <>
struct PopAllHelper<Register> {
static void Pop(BaselineAssembler* basm, Register reg) {
basm->masm()->Pop(reg);
}
};
template <typename... T>
struct PopAllHelper<Register, T...> {
static void Pop(BaselineAssembler* basm, Register reg, T... tail) {
PopAllHelper<Register>::Pop(basm, reg);
PopAllHelper<T...>::Pop(basm, tail...);
}
};
} // namespace detail
template <typename... T>
int BaselineAssembler::Push(T... vals) {
return detail::PushAllHelper<T...>::Push(this, vals...);
}
template <typename... T>
void BaselineAssembler::PushReverse(T... vals) {
detail::PushAllHelper<T...>::PushReverse(this, vals...);
}
template <typename... T>
void BaselineAssembler::Pop(T... registers) {
detail::PopAllHelper<T...>::Pop(this, registers...);
}
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
int offset) {
__ Lw(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
int offset) {
__ Lw(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
Register source,
int offset) {
LoadTaggedSignedField(output, source, offset);
SmiUntag(output);
}
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ Lw(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
Register source, int offset) {
__ lhu(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadWord8Field(Register output, Register source,
int offset) {
__ lb(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
Smi value) {
ASM_CODE_COMMENT(masm_);
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ li(scratch, Operand(value));
__ Sw(scratch, FieldMemOperand(target, offset));
}
void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
int offset,
Register value) {
ASM_CODE_COMMENT(masm_);
__ Sw(value, FieldMemOperand(target, offset));
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ RecordWriteField(target, offset, value, scratch, kRAHasNotBeenSaved,
SaveFPRegsMode::kIgnore);
}
void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
int offset,
Register value) {
__ Sw(value, FieldMemOperand(target, offset));
}
void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
Register feedback_vector,
FeedbackSlot slot,
Label* on_result,
Label::Distance) {
Label fallthrough;
LoadTaggedPointerField(scratch_and_result, feedback_vector,
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
// Is it marked_for_deoptimization? If yes, clear the slot.
{
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ TestCodeTIsMarkedForDeoptimizationAndJump(scratch_and_result, scratch,
eq, on_result);
__ li(scratch, __ ClearedValue());
StoreTaggedFieldNoWriteBarrier(
feedback_vector, FeedbackVector::OffsetOfElementAt(slot.ToInt()),
scratch);
}
__ bind(&fallthrough);
Move(scratch_and_result, 0);
}
void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
int32_t weight, Label* skip_interrupt_label) {
ASM_CODE_COMMENT(masm_);
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
LoadTaggedPointerField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
__ Lw(interrupt_budget,
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
__ Addu(interrupt_budget, interrupt_budget, weight);
__ Sw(interrupt_budget,
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
if (skip_interrupt_label) {
DCHECK_LT(weight, 0);
__ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg));
}
}
void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
Register weight, Label* skip_interrupt_label) {
ASM_CODE_COMMENT(masm_);
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
LoadTaggedPointerField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
__ Lw(interrupt_budget,
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
__ Addu(interrupt_budget, interrupt_budget, weight);
__ Sw(interrupt_budget,
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
if (skip_interrupt_label)
__ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg));
}
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
__ Addu(lhs, lhs, Operand(rhs));
}
void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
__ And(output, lhs, Operand(rhs));
}
void BaselineAssembler::Switch(Register reg, int case_value_base,
Label** labels, int num_labels) {
ASM_CODE_COMMENT(masm_);
Label fallthrough;
if (case_value_base != 0) {
__ Subu(reg, reg, Operand(case_value_base));
}
__ Branch(&fallthrough, AsMasmCondition(Condition::kUnsignedGreaterThanEqual),
reg, Operand(num_labels));
__ GenerateSwitchTable(reg, num_labels,
[labels](size_t i) { return labels[i]; });
__ bind(&fallthrough);
}
#undef __
#define __ basm.
void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
ASM_CODE_COMMENT(masm);
BaselineAssembler basm(masm);
Register weight = BaselineLeaveFrameDescriptor::WeightRegister();
Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister();
{
ASM_CODE_COMMENT_STRING(masm, "Update Interrupt Budget");
Label skip_interrupt_label;
__ AddToInterruptBudgetAndJumpIfNotExceeded(weight, &skip_interrupt_label);
__ masm()->SmiTag(params_size);
__ masm()->Push(params_size, kInterpreterAccumulatorRegister);
__ LoadContext(kContextRegister);
__ LoadFunction(kJSFunctionRegister);
__ masm()->Push(kJSFunctionRegister);
__ CallRuntime(Runtime::kBytecodeBudgetInterrupt_Sparkplug, 1);
__ masm()->Pop(params_size, kInterpreterAccumulatorRegister);
__ masm()->SmiUntag(params_size);
__ Bind(&skip_interrupt_label);
}
BaselineAssembler::ScratchRegisterScope temps(&basm);
Register actual_params_size = temps.AcquireScratch();
// Compute the size of the actual parameters + receiver (in bytes).
__ Move(actual_params_size,
MemOperand(fp, StandardFrameConstants::kArgCOffset));
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
Label corrected_args_count;
__ masm()->Branch(&corrected_args_count, ge, params_size,
Operand(actual_params_size));
__ masm()->Move(params_size, actual_params_size);
__ Bind(&corrected_args_count);
// Leave the frame (also dropping the register file).
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
__ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver);
__ masm()->Ret();
}
#undef __
inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
Register reg) {
assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue, reg,
Operand(kInterpreterAccumulatorRegister));
}
} // namespace baseline
} // namespace internal
} // namespace v8
#endif // V8_BASELINE_MIPS_BASELINE_ASSEMBLER_MIPS_INL_H_

@ -1,78 +0,0 @@
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASELINE_MIPS_BASELINE_COMPILER_MIPS_INL_H_
#define V8_BASELINE_MIPS_BASELINE_COMPILER_MIPS_INL_H_
#include "src/base/logging.h"
#include "src/baseline/baseline-compiler.h"
namespace v8 {
namespace internal {
namespace baseline {
#define __ basm_.
void BaselineCompiler::Prologue() {
ASM_CODE_COMMENT(&masm_);
__ masm()->EnterFrame(StackFrame::BASELINE);
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
int max_frame_size =
bytecode_->frame_size() + max_call_args_ * kSystemPointerSize;
CallBuiltin<Builtin::kBaselineOutOfLinePrologue>(
kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
PrologueFillFrame();
}
void BaselineCompiler::PrologueFillFrame() {
ASM_CODE_COMMENT(&masm_);
// Inlined register frame fill
interpreter::Register new_target_or_generator_register =
bytecode_->incoming_new_target_or_generator_register();
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
int register_count = bytecode_->register_count();
// Magic value
const int kLoopUnrollSize = 8;
const int new_target_index = new_target_or_generator_register.index();
const bool has_new_target = new_target_index != kMaxInt;
if (has_new_target) {
DCHECK_LE(new_target_index, register_count);
__ masm()->Addu(sp, sp, Operand(-(kPointerSize * new_target_index)));
for (int i = 0; i < new_target_index; i++) {
__ masm()->Sw(kInterpreterAccumulatorRegister, MemOperand(sp, i * 4));
}
// Push new_target_or_generator.
__ Push(kJavaScriptCallNewTargetRegister);
register_count -= new_target_index + 1;
}
if (register_count < 2 * kLoopUnrollSize) {
// If the frame is small enough, just unroll the frame fill completely.
__ masm()->Addu(sp, sp, Operand(-(kPointerSize * register_count)));
for (int i = 0; i < register_count; ++i) {
__ masm()->Sw(kInterpreterAccumulatorRegister, MemOperand(sp, i * 4));
}
} else {
__ masm()->Addu(sp, sp, Operand(-(kPointerSize * register_count)));
for (int i = 0; i < register_count; ++i) {
__ masm()->Sw(kInterpreterAccumulatorRegister, MemOperand(sp, i * 4));
}
}
}
void BaselineCompiler::VerifyFrameSize() {
ASM_CODE_COMMENT(&masm_);
__ masm()->Addu(kScratchReg, sp,
Operand(InterpreterFrameConstants::kFixedFrameSizeFromFp +
bytecode_->frame_size()));
__ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer, kScratchReg,
Operand(fp));
}
} // namespace baseline
} // namespace internal
} // namespace v8
#endif // V8_BASELINE_MIPS_BASELINE_COMPILER_MIPS_INL_H_

@ -1270,11 +1270,11 @@ void Builtins::Generate_CEntry_Return2_SaveFPRegs_ArgvOnStack_BuiltinExit(
Generate_CEntry(masm, 2, SaveFPRegsMode::kSave, ArgvMode::kStack, true);
}
#if !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS)
#if !defined(V8_TARGET_ARCH_ARM)
void Builtins::Generate_MemCopyUint8Uint8(MacroAssembler* masm) {
masm->Call(BUILTIN_CODE(masm->isolate(), Illegal), RelocInfo::CODE_TARGET);
}
#endif // !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS)
#endif // !defined(V8_TARGET_ARCH_ARM)
#ifndef V8_TARGET_ARCH_IA32
void Builtins::Generate_MemMove(MacroAssembler* masm) {

@ -251,16 +251,6 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
BIND(&u32);
Return(ChangeUint32ToTagged(AtomicLoad<Uint32T>(
AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 2))));
#if (V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6)
BIND(&i64);
Goto(&u64);
BIND(&u64);
{
TNode<Number> index_number = ChangeUintPtrToTagged(index_word);
Return(CallRuntime(Runtime::kAtomicsLoad64, context, array, index_number));
}
#else
BIND(&i64);
Return(BigIntFromSigned64(AtomicLoad64<AtomicInt64>(
AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 3))));
@ -268,7 +258,6 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
BIND(&u64);
Return(BigIntFromUnsigned64(AtomicLoad64<AtomicUint64>(
AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 3))));
#endif //(V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6)
// This shouldn't happen, we've already validated the type.
BIND(&other);
@ -358,11 +347,6 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
Return(value_integer);
BIND(&u64);
#if V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6
TNode<Number> index_number = ChangeUintPtrToTagged(index_word);
Return(CallRuntime(Runtime::kAtomicsStore64, context, array, index_number,
value));
#else
// 4. If arrayTypeName is "BigUint64Array" or "BigInt64Array",
// let v be ? ToBigInt(value).
TNode<BigInt> value_bigint = ToBigInt(context, value);
@ -379,7 +363,6 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
AtomicStore64(AtomicMemoryOrder::kSeqCst, backing_store,
WordShl(index_word, 3), var_low.value(), high);
Return(value_bigint);
#endif
// This shouldn't happen, we've already validated the type.
BIND(&other);
@ -423,7 +406,7 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
TNode<UintPtrT> index_word =
ValidateAtomicAccess(array, index_or_field_name, context);
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
#if V8_TARGET_ARCH_MIPS64
TNode<Number> index_number = ChangeUintPtrToTagged(index_word);
Return(CallRuntime(Runtime::kAtomicsExchange, context, array, index_number,
value));
@ -523,7 +506,7 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
// This shouldn't happen, we've already validated the type.
BIND(&other);
Unreachable();
#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
#endif // V8_TARGET_ARCH_MIPS64
BIND(&detached_or_out_of_bounds);
{
@ -558,7 +541,7 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
// 2. Let i be ? ValidateAtomicAccess(typedArray, index).
TNode<UintPtrT> index_word = ValidateAtomicAccess(array, index, context);
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
#if V8_TARGET_ARCH_MIPS64
TNode<Number> index_number = ChangeUintPtrToTagged(index_word);
Return(CallRuntime(Runtime::kAtomicsCompareExchange, context, array,
index_number, old_value, new_value));
@ -677,7 +660,7 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
// This shouldn't happen, we've already validated the type.
BIND(&other);
Unreachable();
#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
#endif // V8_TARGET_ARCH_MIPS64
BIND(&detached_or_out_of_bounds);
{
@ -728,7 +711,7 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
// 2. Let i be ? ValidateAtomicAccess(typedArray, index).
TNode<UintPtrT> index_word = ValidateAtomicAccess(array, index, context);
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
#if V8_TARGET_ARCH_MIPS64
TNode<Number> index_number = ChangeUintPtrToTagged(index_word);
Return(CallRuntime(runtime_function, context, array, index_number, value));
#else
@ -818,7 +801,7 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
// // This shouldn't happen, we've already validated the type.
BIND(&other);
Unreachable();
#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
#endif // V8_TARGET_ARCH_MIPS64
BIND(&detached_or_out_of_bounds);
ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);

@ -576,14 +576,14 @@ bool Builtins::CodeObjectIsExecutable(Builtin builtin) {
case Builtin::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit:
return true;
default:
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
#if V8_TARGET_ARCH_MIPS64
// TODO(Loongson): Move non-JS linkage builtins code objects into RO_SPACE
// caused MIPS platform to crash, and we need some time to handle it. Now
// disable this change temporarily on MIPS platform.
return true;
#else
return false;
#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
#endif // V8_TARGET_ARCH_MIPS64
}
}

File diff suppressed because it is too large Load Diff

@ -17,8 +17,6 @@
#include "src/codegen/arm/assembler-arm.h"
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/codegen/ppc/assembler-ppc.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/codegen/mips/assembler-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/assembler-mips64.h"
#elif V8_TARGET_ARCH_LOONG64

@ -17,8 +17,6 @@
#include "src/codegen/arm/assembler-arm-inl.h"
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/codegen/ppc/assembler-ppc-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/codegen/mips/assembler-mips-inl.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/assembler-mips64-inl.h"
#elif V8_TARGET_ARCH_LOONG64

@ -271,8 +271,7 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
int pc_offset() const { return static_cast<int>(pc_ - buffer_start_); }
int pc_offset_for_safepoint() {
#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
defined(V8_TARGET_ARCH_LOONG64)
#if defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_LOONG64)
// MIPS and LOONG need to use their own implementation to avoid trampoline's
// influence.
UNREACHABLE();

@ -11,8 +11,6 @@
#include "src/codegen/arm64/constants-arm64.h"
#elif V8_TARGET_ARCH_IA32
#include "src/codegen/ia32/constants-ia32.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/codegen/mips/constants-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/constants-mips64.h"
#elif V8_TARGET_ARCH_LOONG64

@ -44,7 +44,7 @@ enum CpuFeature {
#elif V8_TARGET_ARCH_ARM64
JSCVT,
#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
#elif V8_TARGET_ARCH_MIPS64
FPU,
FP64FPU,
MIPSr1,

@ -766,8 +766,6 @@ ExternalReference ExternalReference::invoke_accessor_getter_callback() {
#define re_stack_check_func RegExpMacroAssemblerARM::CheckStackGuardState
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#define re_stack_check_func RegExpMacroAssemblerPPC::CheckStackGuardState
#elif V8_TARGET_ARCH_MIPS
#define re_stack_check_func RegExpMacroAssemblerMIPS::CheckStackGuardState
#elif V8_TARGET_ARCH_MIPS64
#define re_stack_check_func RegExpMacroAssemblerMIPS::CheckStackGuardState
#elif V8_TARGET_ARCH_LOONG64

@ -25,8 +25,6 @@
#include "src/codegen/s390/interface-descriptors-s390-inl.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/interface-descriptors-mips64-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/codegen/mips/interface-descriptors-mips-inl.h"
#elif V8_TARGET_ARCH_LOONG64
#include "src/codegen/loong64/interface-descriptors-loong64-inl.h"
#elif V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64
@ -233,7 +231,7 @@ constexpr RegList WriteBarrierDescriptor::ComputeSavedRegisters(
saved_registers.set(SlotAddressRegister());
}
#elif V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_LOONG64 || \
V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_MIPS
V8_TARGET_ARCH_MIPS64
if (object != ObjectRegister()) saved_registers.set(ObjectRegister());
// The slot address is always clobbered.
saved_registers.set(SlotAddressRegister());
@ -333,9 +331,9 @@ constexpr auto LoadWithReceiverBaselineDescriptor::registers() {
// static
constexpr auto BaselineOutOfLinePrologueDescriptor::registers() {
// TODO(v8:11421): Implement on other platforms.
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM || \
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390 || \
V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_MIPS || \
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM || \
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390 || \
V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
V8_TARGET_ARCH_LOONG64 || V8_TARGET_ARCH_RISCV32
return RegisterArray(
kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
@ -357,7 +355,7 @@ constexpr auto BaselineLeaveFrameDescriptor::registers() {
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || \
V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_LOONG64 || V8_TARGET_ARCH_RISCV32
V8_TARGET_ARCH_LOONG64 || V8_TARGET_ARCH_RISCV32
return RegisterArray(ParamsSizeRegister(), WeightRegister());
#else
return DefaultRegisterArray();

@ -118,7 +118,7 @@ const char* CallInterfaceDescriptor::DebugName() const {
}
bool CallInterfaceDescriptor::IsValidFloatParameterRegister(Register reg) {
#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)
#if defined(V8_TARGET_ARCH_MIPS64)
return reg.code() % 2 == 0;
#else
return true;

@ -51,9 +51,6 @@ enum class SmiCheck { kOmit, kInline };
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/codegen/ppc/constants-ppc.h"
#include "src/codegen/ppc/macro-assembler-ppc.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/codegen/mips/constants-mips.h"
#include "src/codegen/mips/macro-assembler-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/constants-mips64.h"
#include "src/codegen/mips64/macro-assembler-mips64.h"

@ -1,355 +0,0 @@
// Copyright (c) 1994-2006 Sun Microsystems Inc.
// All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// - Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// - Redistribution in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// - Neither the name of Sun Microsystems or the names of contributors may
// be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
#ifndef V8_CODEGEN_MIPS_ASSEMBLER_MIPS_INL_H_
#define V8_CODEGEN_MIPS_ASSEMBLER_MIPS_INL_H_
#include "src/codegen/mips/assembler-mips.h"
#include "src/codegen/assembler.h"
#include "src/debug/debug.h"
#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
bool CpuFeatures::SupportsOptimizer() { return IsSupported(FPU); }
// -----------------------------------------------------------------------------
// Operand and MemOperand.
bool Operand::is_reg() const { return rm_.is_valid(); }
int32_t Operand::immediate() const {
DCHECK(!is_reg());
DCHECK(!IsHeapObjectRequest());
return value_.immediate;
}
// -----------------------------------------------------------------------------
// RelocInfo.
void RelocInfo::apply(intptr_t delta) {
if (IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_)) {
// Absolute code pointer inside code object moves with the code object.
Assembler::RelocateInternalReference(rmode_, pc_, delta);
} else if (IsRelativeCodeTarget(rmode_)) {
Assembler::RelocateRelativeReference(rmode_, pc_, delta);
}
}
Address RelocInfo::target_address() {
DCHECK(IsCodeTargetMode(rmode_) || IsRuntimeEntry(rmode_) ||
IsWasmCall(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
}
Address RelocInfo::target_address_address() {
DCHECK(HasTargetAddressAddress());
// Read the address of the word containing the target_address in an
// instruction stream.
// The only architecture-independent user of this function is the serializer.
// The serializer uses it to find out how many raw bytes of instruction to
// output before the next target.
// For an instruction like LUI/ORI where the target bits are mixed into the
// instruction bits, the size of the target will be zero, indicating that the
// serializer should not step forward in memory after a target is resolved
// and written. In this case the target_address_address function should
// return the end of the instructions to be patched, allowing the
// deserializer to deserialize the instructions as raw bytes and put them in
// place, ready to be patched with the target. After jump optimization,
// that is the address of the instruction that follows J/JAL/JR/JALR
// instruction.
if (IsMipsArchVariant(kMips32r6)) {
// On R6 we don't move to the end of the instructions to be patched, but one
// instruction before, because if these instructions are at the end of the
// code object it can cause errors in the deserializer.
return pc_ + (Assembler::kInstructionsFor32BitConstant - 1) * kInstrSize;
} else {
return pc_ + Assembler::kInstructionsFor32BitConstant * kInstrSize;
}
}
Address RelocInfo::constant_pool_entry_address() { UNREACHABLE(); }
int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
void Assembler::deserialization_set_special_target_at(
Address instruction_payload, Code code, Address target) {
set_target_address_at(instruction_payload,
!code.is_null() ? code.constant_pool() : kNullAddress,
target);
}
int Assembler::deserialization_special_target_size(
Address instruction_payload) {
return kSpecialTargetSize;
}
void Assembler::set_target_internal_reference_encoded_at(Address pc,
Address target) {
Instr instr1 = Assembler::instr_at(pc + 0 * kInstrSize);
Instr instr2 = Assembler::instr_at(pc + 1 * kInstrSize);
DCHECK(Assembler::IsLui(instr1));
DCHECK(Assembler::IsOri(instr2) || Assembler::IsJicOrJialc(instr2));
instr1 &= ~kImm16Mask;
instr2 &= ~kImm16Mask;
int32_t imm = static_cast<int32_t>(target);
DCHECK_EQ(imm & 3, 0);
if (Assembler::IsJicOrJialc(instr2)) {
// Encoded internal references are lui/jic load of 32-bit absolute address.
uint32_t lui_offset_u, jic_offset_u;
Assembler::UnpackTargetAddressUnsigned(imm, &lui_offset_u, &jic_offset_u);
Assembler::instr_at_put(pc + 0 * kInstrSize, instr1 | lui_offset_u);
Assembler::instr_at_put(pc + 1 * kInstrSize, instr2 | jic_offset_u);
} else {
// Encoded internal references are lui/ori load of 32-bit absolute address.
PatchLuiOriImmediate(pc, imm, instr1, 0 * kInstrSize, instr2,
1 * kInstrSize);
}
// Currently used only by deserializer, and all code will be flushed
// after complete deserialization, no need to flush on each reference.
}
void Assembler::deserialization_set_target_internal_reference_at(
Address pc, Address target, RelocInfo::Mode mode) {
if (RelocInfo::IsInternalReferenceEncoded(mode)) {
DCHECK(IsLui(instr_at(pc)));
set_target_internal_reference_encoded_at(pc, target);
} else {
DCHECK(RelocInfo::IsInternalReference(mode));
Memory<Address>(pc) = target;
}
}
HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_) ||
IsDataEmbeddedObject(rmode_));
if (IsDataEmbeddedObject(rmode_)) {
return HeapObject::cast(Object(ReadUnalignedValue<Address>(pc_)));
}
return HeapObject::cast(
Object(Assembler::target_address_at(pc_, constant_pool_)));
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
if (IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)) {
return Handle<HeapObject>(reinterpret_cast<Address*>(
Assembler::target_address_at(pc_, constant_pool_)));
} else if (IsDataEmbeddedObject(rmode_)) {
return Handle<HeapObject>::cast(ReadUnalignedValue<Handle<Object>>(pc_));
}
DCHECK(IsRelativeCodeTarget(rmode_));
return origin->relative_code_target_object_handle_at(pc_);
}
void RelocInfo::set_target_object(Heap* heap, HeapObject target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_) ||
IsDataEmbeddedObject(rmode_));
if (IsDataEmbeddedObject(rmode_)) {
WriteUnalignedValue(pc_, target.ptr());
// No need to flush icache since no instructions were changed.
} else {
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
!v8_flags.disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
}
}
Address RelocInfo::target_external_reference() {
DCHECK(IsExternalReference(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
}
void RelocInfo::set_target_external_reference(
Address target, ICacheFlushMode icache_flush_mode) {
DCHECK(IsExternalReference(rmode_));
Assembler::set_target_address_at(pc_, constant_pool_, target,
icache_flush_mode);
}
Address RelocInfo::target_internal_reference() {
if (IsInternalReference(rmode_)) {
return Memory<Address>(pc_);
} else {
// Encoded internal references are lui/ori or lui/jic load of 32-bit
// absolute address.
DCHECK(IsInternalReferenceEncoded(rmode_));
Instr instr1 = Assembler::instr_at(pc_ + 0 * kInstrSize);
Instr instr2 = Assembler::instr_at(pc_ + 1 * kInstrSize);
DCHECK(Assembler::IsLui(instr1));
DCHECK(Assembler::IsOri(instr2) || Assembler::IsJicOrJialc(instr2));
if (Assembler::IsJicOrJialc(instr2)) {
return static_cast<Address>(
Assembler::CreateTargetAddress(instr1, instr2));
}
return static_cast<Address>(Assembler::GetLuiOriImmediate(instr1, instr2));
}
}
Address RelocInfo::target_internal_reference_address() {
DCHECK(IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
return pc_;
}
Builtin RelocInfo::target_builtin_at(Assembler* origin) { UNREACHABLE(); }
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
return target_address();
}
void RelocInfo::set_target_runtime_entry(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsRuntimeEntry(rmode_));
if (target_address() != target)
set_target_address(target, write_barrier_mode, icache_flush_mode);
}
Address RelocInfo::target_off_heap_target() {
DCHECK(IsOffHeapTarget(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
}
void RelocInfo::WipeOut() {
DCHECK(IsFullEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_) ||
IsOffHeapTarget(rmode_));
if (IsInternalReference(rmode_)) {
Memory<Address>(pc_) = kNullAddress;
} else if (IsInternalReferenceEncoded(rmode_)) {
Assembler::set_target_internal_reference_encoded_at(pc_, kNullAddress);
} else {
Assembler::set_target_address_at(pc_, constant_pool_, kNullAddress);
}
}
Handle<Code> Assembler::relative_code_target_object_handle_at(
Address pc) const {
Instr instr1 = instr_at(pc);
Instr instr2 = instr_at(pc + kInstrSize);
DCHECK(IsLui(instr1));
DCHECK(IsOri(instr2) || IsNal(instr2));
DCHECK(IsNal(instr2) || IsNal(instr_at(pc - kInstrSize)));
if (IsNal(instr2)) {
instr2 = instr_at(pc + 2 * kInstrSize);
}
// Interpret 2 instructions generated by li (lui/ori).
int code_target_index = GetLuiOriImmediate(instr1, instr2);
return GetCodeTarget(code_target_index);
}
// -----------------------------------------------------------------------------
// Assembler.
void Assembler::CheckBuffer() {
if (buffer_space() <= kGap) {
GrowBuffer();
}
}
void Assembler::CheckForEmitInForbiddenSlot() {
if (!is_buffer_growth_blocked()) {
CheckBuffer();
}
if (IsPrevInstrCompactBranch()) {
// Nop instruction to precede a CTI in forbidden slot:
Instr nop = SPECIAL | SLL;
*reinterpret_cast<Instr*>(pc_) = nop;
pc_ += kInstrSize;
ClearCompactBranchState();
}
}
void Assembler::EmitHelper(Instr x, CompactBranchType is_compact_branch) {
if (IsPrevInstrCompactBranch()) {
if (Instruction::IsForbiddenAfterBranchInstr(x)) {
// Nop instruction to precede a CTI in forbidden slot:
Instr nop = SPECIAL | SLL;
*reinterpret_cast<Instr*>(pc_) = nop;
pc_ += kInstrSize;
}
ClearCompactBranchState();
}
*reinterpret_cast<Instr*>(pc_) = x;
pc_ += kInstrSize;
if (is_compact_branch == CompactBranchType::COMPACT_BRANCH) {
EmittedCompactBranchInstruction();
}
CheckTrampolinePoolQuick();
}
template <>
inline void Assembler::EmitHelper(uint8_t x);
template <typename T>
void Assembler::EmitHelper(T x) {
*reinterpret_cast<T*>(pc_) = x;
pc_ += sizeof(x);
CheckTrampolinePoolQuick();
}
template <>
void Assembler::EmitHelper(uint8_t x) {
*reinterpret_cast<uint8_t*>(pc_) = x;
pc_ += sizeof(x);
if (reinterpret_cast<intptr_t>(pc_) % kInstrSize == 0) {
CheckTrampolinePoolQuick();
}
}
void Assembler::emit(Instr x, CompactBranchType is_compact_branch) {
if (!is_buffer_growth_blocked()) {
CheckBuffer();
}
EmitHelper(x, is_compact_branch);
}
EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
} // namespace internal
} // namespace v8
#endif // V8_CODEGEN_MIPS_ASSEMBLER_MIPS_INL_H_

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -1,144 +0,0 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_MIPS
#include "src/codegen/mips/constants-mips.h"
namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
// Registers.
// These register names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
const char* Registers::names_[kNumSimuRegisters] = {
"zero_reg", "at", "v0", "v1", "a0", "a1", "a2", "a3", "t0",
"t1", "t2", "t3", "t4", "t5", "t6", "t7", "s0", "s1",
"s2", "s3", "s4", "s5", "s6", "s7", "t8", "t9", "k0",
"k1", "gp", "sp", "fp", "ra", "LO", "HI", "pc"};
// List of alias names which can be used when referring to MIPS registers.
const Registers::RegisterAlias Registers::aliases_[] = {
{0, "zero"},
{23, "cp"},
{30, "s8"},
{30, "s8_fp"},
{kInvalidRegister, nullptr}};
const char* Registers::Name(int reg) {
const char* result;
if ((0 <= reg) && (reg < kNumSimuRegisters)) {
result = names_[reg];
} else {
result = "noreg";
}
return result;
}
int Registers::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumSimuRegisters; i++) {
if (strcmp(names_[i], name) == 0) {
return i;
}
}
// Look through the alias names.
int i = 0;
while (aliases_[i].reg != kInvalidRegister) {
if (strcmp(aliases_[i].name, name) == 0) {
return aliases_[i].reg;
}
i++;
}
// No register with the reguested name found.
return kInvalidRegister;
}
const char* FPURegisters::names_[kNumFPURegisters] = {
"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10",
"f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21",
"f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"};
// List of alias names which can be used when referring to MIPS registers.
const FPURegisters::RegisterAlias FPURegisters::aliases_[] = {
{kInvalidRegister, nullptr}};
const char* FPURegisters::Name(int creg) {
const char* result;
if ((0 <= creg) && (creg < kNumFPURegisters)) {
result = names_[creg];
} else {
result = "nocreg";
}
return result;
}
int FPURegisters::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumFPURegisters; i++) {
if (strcmp(names_[i], name) == 0) {
return i;
}
}
// Look through the alias names.
int i = 0;
while (aliases_[i].creg != kInvalidRegister) {
if (strcmp(aliases_[i].name, name) == 0) {
return aliases_[i].creg;
}
i++;
}
// No Cregister with the reguested name found.
return kInvalidFPURegister;
}
const char* MSARegisters::names_[kNumMSARegisters] = {
"w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10",
"w11", "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21",
"w22", "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "w31"};
const MSARegisters::RegisterAlias MSARegisters::aliases_[] = {
{kInvalidRegister, nullptr}};
const char* MSARegisters::Name(int creg) {
const char* result;
if ((0 <= creg) && (creg < kNumMSARegisters)) {
result = names_[creg];
} else {
result = "nocreg";
}
return result;
}
int MSARegisters::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumMSARegisters; i++) {
if (strcmp(names_[i], name) == 0) {
return i;
}
}
// Look through the alias names.
int i = 0;
while (aliases_[i].creg != kInvalidRegister) {
if (strcmp(aliases_[i].name, name) == 0) {
return aliases_[i].creg;
}
i++;
}
// No Cregister with the reguested name found.
return kInvalidMSARegister;
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_MIPS

File diff suppressed because it is too large Load Diff

@ -1,45 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// CPU specific code for arm independent of OS goes here.
#include <sys/syscall.h>
#include <unistd.h>
#ifdef __mips
#include <asm/cachectl.h>
#endif // #ifdef __mips
#if V8_TARGET_ARCH_MIPS
#include "src/codegen/cpu-features.h"
namespace v8 {
namespace internal {
void CpuFeatures::FlushICache(void* start, size_t size) {
#if !defined(USE_SIMULATOR)
// Nothing to do, flushing no instructions.
if (size == 0) {
return;
}
#if defined(ANDROID)
// Bionic cacheflush can typically run in userland, avoiding kernel call.
char* end = reinterpret_cast<char*>(start) + size;
cacheflush(reinterpret_cast<intptr_t>(start), reinterpret_cast<intptr_t>(end),
0);
#else // ANDROID
int res;
// See http://www.linux-mips.org/wiki/Cacheflush_Syscall.
res = syscall(__NR_cacheflush, start, size, ICACHE);
if (res) FATAL("Failed to flush the instruction cache");
#endif // ANDROID
#endif // !USE_SIMULATOR.
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_MIPS

@ -1,314 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CODEGEN_MIPS_INTERFACE_DESCRIPTORS_MIPS_INL_H_
#define V8_CODEGEN_MIPS_INTERFACE_DESCRIPTORS_MIPS_INL_H_
#if V8_TARGET_ARCH_MIPS
#include "src/codegen/interface-descriptors.h"
#include "src/execution/frames.h"
namespace v8 {
namespace internal {
constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
auto registers = RegisterArray(a0, a1, a2, a3, t0);
static_assert(registers.size() == kMaxBuiltinRegisterParams);
return registers;
}
#if DEBUG
template <typename DerivedDescriptor>
void StaticCallInterfaceDescriptor<DerivedDescriptor>::
VerifyArgumentRegisterCount(CallInterfaceDescriptorData* data, int argc) {
RegList allocatable_regs = data->allocatable_registers();
if (argc >= 1) DCHECK(allocatable_regs.has(a0));
if (argc >= 2) DCHECK(allocatable_regs.has(a1));
if (argc >= 3) DCHECK(allocatable_regs.has(a2));
if (argc >= 4) DCHECK(allocatable_regs.has(a3));
// Additional arguments are passed on the stack.
}
#endif // DEBUG
// static
constexpr auto WriteBarrierDescriptor::registers() {
return RegisterArray(a1, t1, t0, a0, a2, v0, a3, kContextRegister);
}
// static
constexpr Register LoadDescriptor::ReceiverRegister() { return a1; }
// static
constexpr Register LoadDescriptor::NameRegister() { return a2; }
// static
constexpr Register LoadDescriptor::SlotRegister() { return a0; }
// static
constexpr Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
// static
constexpr Register KeyedLoadBaselineDescriptor::ReceiverRegister() {
return a1;
}
// static
constexpr Register KeyedLoadBaselineDescriptor::NameRegister() {
return kInterpreterAccumulatorRegister;
}
// static
constexpr Register KeyedLoadBaselineDescriptor::SlotRegister() { return a2; }
// static
constexpr Register KeyedLoadWithVectorDescriptor::VectorRegister() {
return a3;
}
// static
constexpr Register KeyedHasICBaselineDescriptor::ReceiverRegister() {
return kInterpreterAccumulatorRegister;
}
// static
constexpr Register KeyedHasICBaselineDescriptor::NameRegister() { return a1; }
// static
constexpr Register KeyedHasICBaselineDescriptor::SlotRegister() { return a2; }
// static
constexpr Register KeyedHasICWithVectorDescriptor::VectorRegister() {
return a3;
}
// static
constexpr Register
LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
return t0;
}
// static
constexpr Register StoreDescriptor::ReceiverRegister() { return a1; }
// static
constexpr Register StoreDescriptor::NameRegister() { return a2; }
// static
constexpr Register StoreDescriptor::ValueRegister() { return a0; }
// static
constexpr Register StoreDescriptor::SlotRegister() { return t0; }
// static
constexpr Register StoreWithVectorDescriptor::VectorRegister() { return a3; }
// static
constexpr Register StoreTransitionDescriptor::MapRegister() { return t1; }
// static
constexpr Register ApiGetterDescriptor::HolderRegister() { return a0; }
// static
constexpr Register ApiGetterDescriptor::CallbackRegister() { return a3; }
// static
constexpr Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
// static
constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
// static
constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
return a2;
}
// static
constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() {
// TODO(v8:11421): Implement on this platform.
return a3;
}
// static
constexpr Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
// static
constexpr auto TypeofDescriptor::registers() { return RegisterArray(a0); }
// static
constexpr auto CallTrampolineDescriptor::registers() {
// a1: target
// a0: number of arguments
return RegisterArray(a1, a0);
}
// static
constexpr auto CopyDataPropertiesWithExcludedPropertiesDescriptor::registers() {
// a1 : the source
// a0 : the excluded property count
return RegisterArray(a1, a0);
}
// static
constexpr auto
CopyDataPropertiesWithExcludedPropertiesOnStackDescriptor::registers() {
// a1 : the source
// a0 : the excluded property count
// a2 : the excluded property base
return RegisterArray(a1, a0, a2);
}
// static
constexpr auto CallVarargsDescriptor::registers() {
// a0 : number of arguments (on the stack)
// a1 : the target to call
// t0 : arguments list length (untagged)
// a2 : arguments list (FixedArray)
return RegisterArray(a1, a0, t0, a2);
}
// static
constexpr auto CallForwardVarargsDescriptor::registers() {
// a1: the target to call
// a0: number of arguments
// a2: start index (to support rest parameters)
return RegisterArray(a1, a0, a2);
}
// static
constexpr auto CallFunctionTemplateDescriptor::registers() {
// a1 : function template info
// a0 : number of arguments (on the stack)
return RegisterArray(a1, a0);
}
// static
constexpr auto CallWithSpreadDescriptor::registers() {
// a0 : number of arguments (on the stack)
// a1 : the target to call
// a2 : the object to spread
return RegisterArray(a1, a0, a2);
}
// static
constexpr auto CallWithArrayLikeDescriptor::registers() {
// a1 : the target to call
// a2 : the arguments list
return RegisterArray(a1, a2);
}
// static
constexpr auto ConstructVarargsDescriptor::registers() {
// a0 : number of arguments (on the stack)
// a1 : the target to call
// a3 : the new target
// t0 : arguments list length (untagged)
// a2 : arguments list (FixedArray)
return RegisterArray(a1, a3, a0, t0, a2);
}
// static
constexpr auto ConstructForwardVarargsDescriptor::registers() {
// a1: the target to call
// a3: new target
// a0: number of arguments
// a2: start index (to support rest parameters)
return RegisterArray(a1, a3, a0, a2);
}
// static
constexpr auto ConstructWithSpreadDescriptor::registers() {
// a0 : number of arguments (on the stack)
// a1 : the target to call
// a3 : the new target
// a2 : the object to spread
return RegisterArray(a1, a3, a0, a2);
}
// static
constexpr auto ConstructWithArrayLikeDescriptor::registers() {
// a1 : the target to call
// a3 : the new target
// a2 : the arguments list
return RegisterArray(a1, a3, a2);
}
// static
constexpr auto ConstructStubDescriptor::registers() {
// a1: target
// a3: new target
// a0: number of arguments
return RegisterArray(a1, a3, a0);
}
// static
constexpr auto AbortDescriptor::registers() { return RegisterArray(a0); }
// static
constexpr auto CompareDescriptor::registers() { return RegisterArray(a1, a0); }
// static
constexpr auto Compare_BaselineDescriptor::registers() {
// a1: left operand
// a0: right operand
// a2: feedback slot
return RegisterArray(a1, a0, a2);
}
// static
constexpr auto BinaryOpDescriptor::registers() { return RegisterArray(a1, a0); }
// static
constexpr auto BinaryOp_BaselineDescriptor::registers() {
// TODO(v8:11421): Implement on this platform.
return RegisterArray(a1, a0, a2);
}
// static
constexpr auto BinarySmiOp_BaselineDescriptor::registers() {
// TODO(v8:11421): Implement on this platform.
return RegisterArray(a0, a1, a2);
}
// static
constexpr auto ApiCallbackDescriptor::registers() {
// a1 : kApiFunctionAddress
// a2 : kArgc
// a3 : kCallData
// a0 : kHolder
return RegisterArray(a1, a2, a3, a0);
}
// static
constexpr auto InterpreterDispatchDescriptor::registers() {
return RegisterArray(
kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister);
}
// static
constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
// a0 : argument count
// a2 : address of first argument
// a1 : the target callable to be call
return RegisterArray(a0, a2, a1);
}
// static
constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() {
// a0 : argument count
// t4 : address of the first argument
// a1 : constructor to call
// a3 : new target
// a2 : allocation site feedback if available, undefined otherwise
return RegisterArray(a0, t4, a1, a3, a2);
}
// static
constexpr auto ResumeGeneratorDescriptor::registers() {
// v0 : the value to pass to the generator
// a1 : the JSGeneratorObject to resume
return RegisterArray(v0, a1);
}
// static
constexpr auto RunMicrotasksEntryDescriptor::registers() {
return RegisterArray(a0, a1);
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_MIPS
#endif // V8_CODEGEN_MIPS_INTERFACE_DESCRIPTORS_MIPS_INL_H_

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -1,299 +0,0 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CODEGEN_MIPS_REGISTER_MIPS_H_
#define V8_CODEGEN_MIPS_REGISTER_MIPS_H_
#include "src/codegen/mips/constants-mips.h"
#include "src/codegen/register-base.h"
namespace v8 {
namespace internal {
// clang-format off
#define GENERAL_REGISTERS(V) \
V(zero_reg) V(at) V(v0) V(v1) V(a0) V(a1) V(a2) V(a3) \
V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(t6) V(t7) \
V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) V(t8) V(t9) \
V(k0) V(k1) V(gp) V(sp) V(fp) V(ra)
#define ALLOCATABLE_GENERAL_REGISTERS(V) \
V(a0) V(a1) V(a2) V(a3) \
V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(t6) V(s7) \
V(v0) V(v1)
#define DOUBLE_REGISTERS(V) \
V(f0) V(f1) V(f2) V(f3) V(f4) V(f5) V(f6) V(f7) \
V(f8) V(f9) V(f10) V(f11) V(f12) V(f13) V(f14) V(f15) \
V(f16) V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) \
V(f24) V(f25) V(f26) V(f27) V(f28) V(f29) V(f30) V(f31)
// Currently, MIPS just use even float point register, except
// for C function param registers.
#define DOUBLE_USE_REGISTERS(V) \
V(f0) V(f2) V(f4) V(f6) V(f8) V(f10) V(f12) V(f13) \
V(f14) V(f15) V(f16) V(f18) V(f20) V(f22) V(f24) V(f26) \
V(f28) V(f30)
#define FLOAT_REGISTERS DOUBLE_REGISTERS
#define SIMD128_REGISTERS(V) \
V(w0) V(w1) V(w2) V(w3) V(w4) V(w5) V(w6) V(w7) \
V(w8) V(w9) V(w10) V(w11) V(w12) V(w13) V(w14) V(w15) \
V(w16) V(w17) V(w18) V(w19) V(w20) V(w21) V(w22) V(w23) \
V(w24) V(w25) V(w26) V(w27) V(w28) V(w29) V(w30) V(w31)
#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
V(f0) V(f2) V(f4) V(f6) V(f8) V(f10) V(f12) V(f14) \
V(f16) V(f18) V(f20) V(f22) V(f24)
// clang-format on
// Register lists.
// Note that the bit values must match those used in actual instruction
// encoding.
const int kNumRegs = 32;
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
// compatible with int, which has caused code-generation bugs.
//
// 2) We would prefer to use a class instead of a struct but we don't like
// the register initialization to depend on the particular initialization
// order (which appears to be different on OS X, Linux, and Windows for the
// installed versions of C++ we tried). Using a struct permits C-style
// "initialization". Also, the Register objects cannot be const as this
// forces initialization stubs in MSVC, making us dependent on initialization
// order.
//
// 3) By not using an enum, we are possibly preventing the compiler from
// doing certain constant folds, which may significantly reduce the
// code generated for some assembly instructions (because they boil down
// to a few constants). If this is a problem, we could change the code
// such that we use an enum in optimized mode, and the struct in debug
// mode. This way we get the compile-time error checking in debug mode
// and best performance in optimized code.
// -----------------------------------------------------------------------------
// Implementation of Register and FPURegister.
enum RegisterCode {
#define REGISTER_CODE(R) kRegCode_##R,
GENERAL_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
kRegAfterLast
};
class Register : public RegisterBase<Register, kRegAfterLast> {
public:
#if defined(V8_TARGET_LITTLE_ENDIAN)
static constexpr int kMantissaOffset = 0;
static constexpr int kExponentOffset = 4;
#elif defined(V8_TARGET_BIG_ENDIAN)
static constexpr int kMantissaOffset = 4;
static constexpr int kExponentOffset = 0;
#else
#error Unknown endianness
#endif
private:
friend class RegisterBase;
explicit constexpr Register(int code) : RegisterBase(code) {}
};
// s7: context register
// s3: scratch register
// s4: scratch register 2
#define DECLARE_REGISTER(R) \
constexpr Register R = Register::from_code(kRegCode_##R);
GENERAL_REGISTERS(DECLARE_REGISTER)
#undef DECLARE_REGISTER
constexpr Register no_reg = Register::no_reg();
int ToNumber(Register reg);
Register ToRegister(int num);
// Returns the number of padding slots needed for stack pointer alignment.
constexpr int ArgumentPaddingSlots(int argument_count) {
// No argument padding required.
return 0;
}
constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap;
constexpr bool kSimdMaskRegisters = false;
enum DoubleRegisterCode {
#define REGISTER_CODE(R) kDoubleCode_##R,
DOUBLE_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
kDoubleAfterLast
};
// Coprocessor register.
class FPURegister : public RegisterBase<FPURegister, kDoubleAfterLast> {
public:
FPURegister low() const {
// Find low reg of a Double-reg pair, which is the reg itself.
DCHECK_EQ(code() % 2, 0); // Specified Double reg must be even.
return FPURegister::from_code(code());
}
FPURegister high() const {
// Find high reg of a Doubel-reg pair, which is reg + 1.
DCHECK_EQ(code() % 2, 0); // Specified Double reg must be even.
return FPURegister::from_code(code() + 1);
}
private:
friend class RegisterBase;
explicit constexpr FPURegister(int code) : RegisterBase(code) {}
};
enum MSARegisterCode {
#define REGISTER_CODE(R) kMsaCode_##R,
SIMD128_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
kMsaAfterLast
};
// MIPS SIMD (MSA) register
class MSARegister : public RegisterBase<MSARegister, kMsaAfterLast> {
friend class RegisterBase;
explicit constexpr MSARegister(int code) : RegisterBase(code) {}
};
// A few double registers are reserved: one as a scratch register and one to
// hold 0.0.
// f28: 0.0
// f30: scratch register.
// V8 now supports the O32 ABI, and the FPU Registers are organized as 32
// 32-bit registers, f0 through f31. When used as 'double' they are used
// in pairs, starting with the even numbered register. So a double operation
// on f0 really uses f0 and f1.
// (Modern mips hardware also supports 32 64-bit registers, via setting
// (priviledged) Status Register FR bit to 1. This is used by the N32 ABI,
// but it is not in common use. Someday we will want to support this in v8.)
// For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers.
using FloatRegister = FPURegister;
using DoubleRegister = FPURegister;
#define DECLARE_DOUBLE_REGISTER(R) \
constexpr DoubleRegister R = DoubleRegister::from_code(kDoubleCode_##R);
DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER)
#undef DECLARE_DOUBLE_REGISTER
constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
// SIMD registers.
using Simd128Register = MSARegister;
#define DECLARE_SIMD128_REGISTER(R) \
constexpr Simd128Register R = Simd128Register::from_code(kMsaCode_##R);
SIMD128_REGISTERS(DECLARE_SIMD128_REGISTER)
#undef DECLARE_SIMD128_REGISTER
const Simd128Register no_msareg = Simd128Register::no_reg();
// Register aliases.
// cp is assumed to be a callee saved register.
constexpr Register kRootRegister = s6;
constexpr Register cp = s7;
constexpr Register kScratchReg = s3;
constexpr Register kScratchReg2 = s4;
constexpr DoubleRegister kScratchDoubleReg = f30;
constexpr DoubleRegister kDoubleRegZero = f28;
// Used on mips32r6 for compare operations.
constexpr DoubleRegister kDoubleCompareReg = f26;
// MSA zero and scratch regs must have the same numbers as FPU zero and scratch
constexpr Simd128Register kSimd128RegZero = w28;
constexpr Simd128Register kSimd128ScratchReg = w30;
// FPU (coprocessor 1) control registers.
// Currently only FCSR (#31) is implemented.
struct FPUControlRegister {
bool is_valid() const { return reg_code == kFCSRRegister; }
bool is(FPUControlRegister creg) const { return reg_code == creg.reg_code; }
int code() const {
DCHECK(is_valid());
return reg_code;
}
int bit() const {
DCHECK(is_valid());
return 1 << reg_code;
}
void setcode(int f) {
reg_code = f;
DCHECK(is_valid());
}
// Unfortunately we can't make this private in a struct.
int reg_code;
};
constexpr FPUControlRegister no_fpucreg = {kInvalidFPUControlRegister};
constexpr FPUControlRegister FCSR = {kFCSRRegister};
// MSA control registers
struct MSAControlRegister {
bool is_valid() const {
return (reg_code == kMSAIRRegister) || (reg_code == kMSACSRRegister);
}
bool is(MSAControlRegister creg) const { return reg_code == creg.reg_code; }
int code() const {
DCHECK(is_valid());
return reg_code;
}
int bit() const {
DCHECK(is_valid());
return 1 << reg_code;
}
void setcode(int f) {
reg_code = f;
DCHECK(is_valid());
}
// Unfortunately we can't make this private in a struct.
int reg_code;
};
constexpr MSAControlRegister no_msacreg = {kInvalidMSAControlRegister};
constexpr MSAControlRegister MSAIR = {kMSAIRRegister};
constexpr MSAControlRegister MSACSR = {kMSACSRRegister};
// Define {RegisterName} methods for the register types.
DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS)
DEFINE_REGISTER_NAMES(FPURegister, DOUBLE_REGISTERS)
DEFINE_REGISTER_NAMES(MSARegister, SIMD128_REGISTERS)
// Give alias names to registers for calling conventions.
constexpr Register kReturnRegister0 = v0;
constexpr Register kReturnRegister1 = v1;
constexpr Register kReturnRegister2 = a0;
constexpr Register kJSFunctionRegister = a1;
constexpr Register kContextRegister = s7;
constexpr Register kAllocateSizeRegister = a0;
constexpr Register kInterpreterAccumulatorRegister = v0;
constexpr Register kInterpreterBytecodeOffsetRegister = t4;
constexpr Register kInterpreterBytecodeArrayRegister = t5;
constexpr Register kInterpreterDispatchTableRegister = t6;
constexpr Register kJavaScriptCallArgCountRegister = a0;
constexpr Register kJavaScriptCallCodeStartRegister = a2;
constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
constexpr Register kJavaScriptCallNewTargetRegister = a3;
constexpr Register kJavaScriptCallExtraArg1Register = a2;
constexpr Register kOffHeapTrampolineRegister = at;
constexpr Register kRuntimeCallFunctionRegister = a1;
constexpr Register kRuntimeCallArgCountRegister = a0;
constexpr Register kRuntimeCallArgvRegister = a2;
constexpr Register kWasmInstanceRegister = a0;
constexpr Register kWasmCompileLazyFuncIndexRegister = t0;
constexpr DoubleRegister kFPReturnRegister0 = f0;
} // namespace internal
} // namespace v8
#endif // V8_CODEGEN_MIPS_REGISTER_MIPS_H_

@ -1,48 +0,0 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CODEGEN_MIPS_REGLIST_MIPS_H_
#define V8_CODEGEN_MIPS_REGLIST_MIPS_H_
#include "src/codegen/mips/constants-mips.h"
#include "src/codegen/register-arch.h"
#include "src/codegen/reglist-base.h"
namespace v8 {
namespace internal {
using RegList = RegListBase<Register>;
using DoubleRegList = RegListBase<DoubleRegister>;
ASSERT_TRIVIALLY_COPYABLE(RegList);
ASSERT_TRIVIALLY_COPYABLE(DoubleRegList);
const RegList kJSCallerSaved = {v0, v1, a0, a1, a2, a3, t0,
t1, t2, t3, t4, t5, t6, t7};
const int kNumJSCallerSaved = 14;
// Callee-saved registers preserved when switching from C to JavaScript.
const RegList kCalleeSaved = {s0, // s0
s1, // s1
s2, // s2
s3, // s3
s4, // s4
s5, // s5
s6, // s6 (roots in Javascript code)
s7, // s7 (cp in Javascript code)
fp}; // fp/s8
const int kNumCalleeSaved = 9;
const DoubleRegList kCalleeSavedFPU = {f20, f22, f24, f26, f28, f30};
const int kNumCalleeSavedFPU = 6;
const DoubleRegList kCallerSavedFPU = {f0, f2, f4, f6, f8,
f10, f12, f14, f16, f18};
} // namespace internal
} // namespace v8
#endif // V8_CODEGEN_MIPS_REGLIST_MIPS_H_

@ -17,8 +17,6 @@
#include "src/codegen/arm/register-arm.h"
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/codegen/ppc/register-ppc.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/codegen/mips/register-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/register-mips64.h"
#elif V8_TARGET_ARCH_LOONG64

@ -15,8 +15,6 @@
#include "src/codegen/arm/reglist-arm.h"
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/codegen/ppc/reglist-ppc.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/codegen/mips/reglist-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/reglist-mips64.h"
#elif V8_TARGET_ARCH_LOONG64

@ -309,11 +309,10 @@ bool RelocInfo::OffHeapTargetIsCodedSpecially() {
#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_ARM64) || \
defined(V8_TARGET_ARCH_X64)
return false;
#elif defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_MIPS) || \
defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_PPC) || \
defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_S390) || \
defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_LOONG64) || \
defined(V8_TARGET_ARCH_RISCV32)
#elif defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_MIPS64) || \
defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64) || \
defined(V8_TARGET_ARCH_S390) || defined(V8_TARGET_ARCH_RISCV64) || \
defined(V8_TARGET_ARCH_LOONG64) || defined(V8_TARGET_ARCH_RISCV32)
return true;
#endif
}

@ -71,7 +71,7 @@ class RelocInfo {
EXTERNAL_REFERENCE, // The address of an external C++ function.
INTERNAL_REFERENCE, // An address inside the same function.
// Encoded internal reference, used only on RISCV64, RISCV32, MIPS, MIPS64
// Encoded internal reference, used only on RISCV64, RISCV32, MIPS64
// and PPC.
INTERNAL_REFERENCE_ENCODED,

@ -46,9 +46,6 @@ namespace internal {
#if (V8_TARGET_ARCH_PPC64 && !V8_HOST_ARCH_PPC64)
#define USE_SIMULATOR 1
#endif
#if (V8_TARGET_ARCH_MIPS && !V8_HOST_ARCH_MIPS)
#define USE_SIMULATOR 1
#endif
#if (V8_TARGET_ARCH_MIPS64 && !V8_HOST_ARCH_MIPS64)
#define USE_SIMULATOR 1
#endif
@ -428,7 +425,7 @@ constexpr bool kPlatformRequiresCodeRange = false;
constexpr size_t kMaximalCodeRangeSize = 0 * MB;
constexpr size_t kMinimumCodeRangeSize = 0 * MB;
constexpr size_t kMinExpectedOSPageSize = 64 * KB; // OS page on PPC Linux
#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_RISCV32
#elif V8_TARGET_ARCH_RISCV32
constexpr bool kPlatformRequiresCodeRange = false;
constexpr size_t kMaximalCodeRangeSize = 2048LL * MB;
constexpr size_t kMinimumCodeRangeSize = 0 * MB;
@ -1359,9 +1356,7 @@ enum AllocationSiteMode {
enum class AllocationSiteUpdateMode { kUpdate, kCheckOnly };
// The mips architecture prior to revision 5 has inverted encoding for sNaN.
#if (V8_TARGET_ARCH_MIPS && !defined(_MIPS_ARCH_MIPS32R6) && \
(!defined(USE_SIMULATOR) || !defined(_MIPS_TARGET_SIMULATOR))) || \
(V8_TARGET_ARCH_MIPS64 && !defined(_MIPS_ARCH_MIPS64R6) && \
#if (V8_TARGET_ARCH_MIPS64 && !defined(_MIPS_ARCH_MIPS64R6) && \
(!defined(USE_SIMULATOR) || !defined(_MIPS_TARGET_SIMULATOR)))
constexpr uint32_t kHoleNanUpper32 = 0xFFFF7FFF;
constexpr uint32_t kHoleNanLower32 = 0xFFFF7FFF;

@ -13,8 +13,6 @@
#include "src/compiler/backend/arm64/instruction-codes-arm64.h"
#elif V8_TARGET_ARCH_IA32
#include "src/compiler/backend/ia32/instruction-codes-ia32.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/compiler/backend/mips/instruction-codes-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/compiler/backend/mips64/instruction-codes-mips64.h"
#elif V8_TARGET_ARCH_LOONG64

@ -2698,8 +2698,7 @@ void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
#endif // V8_TARGET_ARCH_64_BIT
#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_RISCV32
#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_RISCV32
void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
UNIMPLEMENTED();
}
@ -2735,7 +2734,7 @@ void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) {
void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
UNIMPLEMENTED();
}
#endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
#endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM
// && !V8_TARGET_ARCH_RISCV32
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && \

File diff suppressed because it is too large Load Diff

@ -1,400 +0,0 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_BACKEND_MIPS_INSTRUCTION_CODES_MIPS_H_
#define V8_COMPILER_BACKEND_MIPS_INSTRUCTION_CODES_MIPS_H_
namespace v8 {
namespace internal {
namespace compiler {
// MIPS-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
#define TARGET_ARCH_OPCODE_LIST(V) \
V(MipsAdd) \
V(MipsAddOvf) \
V(MipsSub) \
V(MipsSubOvf) \
V(MipsMul) \
V(MipsMulOvf) \
V(MipsMulHigh) \
V(MipsMulHighU) \
V(MipsDiv) \
V(MipsDivU) \
V(MipsMod) \
V(MipsModU) \
V(MipsAnd) \
V(MipsOr) \
V(MipsNor) \
V(MipsXor) \
V(MipsClz) \
V(MipsCtz) \
V(MipsPopcnt) \
V(MipsLsa) \
V(MipsShl) \
V(MipsShr) \
V(MipsSar) \
V(MipsShlPair) \
V(MipsShrPair) \
V(MipsSarPair) \
V(MipsExt) \
V(MipsIns) \
V(MipsRor) \
V(MipsMov) \
V(MipsTst) \
V(MipsCmp) \
V(MipsCmpS) \
V(MipsAddS) \
V(MipsSubS) \
V(MipsMulS) \
V(MipsDivS) \
V(MipsAbsS) \
V(MipsSqrtS) \
V(MipsMaxS) \
V(MipsMinS) \
V(MipsCmpD) \
V(MipsAddD) \
V(MipsSubD) \
V(MipsMulD) \
V(MipsDivD) \
V(MipsModD) \
V(MipsAbsD) \
V(MipsSqrtD) \
V(MipsMaxD) \
V(MipsMinD) \
V(MipsNegS) \
V(MipsNegD) \
V(MipsAddPair) \
V(MipsSubPair) \
V(MipsMulPair) \
V(MipsMaddS) \
V(MipsMaddD) \
V(MipsMsubS) \
V(MipsMsubD) \
V(MipsFloat32RoundDown) \
V(MipsFloat32RoundTruncate) \
V(MipsFloat32RoundUp) \
V(MipsFloat32RoundTiesEven) \
V(MipsFloat64RoundDown) \
V(MipsFloat64RoundTruncate) \
V(MipsFloat64RoundUp) \
V(MipsFloat64RoundTiesEven) \
V(MipsCvtSD) \
V(MipsCvtDS) \
V(MipsTruncWD) \
V(MipsRoundWD) \
V(MipsFloorWD) \
V(MipsCeilWD) \
V(MipsTruncWS) \
V(MipsRoundWS) \
V(MipsFloorWS) \
V(MipsCeilWS) \
V(MipsTruncUwD) \
V(MipsTruncUwS) \
V(MipsCvtDW) \
V(MipsCvtDUw) \
V(MipsCvtSW) \
V(MipsCvtSUw) \
V(MipsLb) \
V(MipsLbu) \
V(MipsSb) \
V(MipsLh) \
V(MipsUlh) \
V(MipsLhu) \
V(MipsUlhu) \
V(MipsSh) \
V(MipsUsh) \
V(MipsLw) \
V(MipsUlw) \
V(MipsSw) \
V(MipsUsw) \
V(MipsLwc1) \
V(MipsUlwc1) \
V(MipsSwc1) \
V(MipsUswc1) \
V(MipsLdc1) \
V(MipsUldc1) \
V(MipsSdc1) \
V(MipsUsdc1) \
V(MipsFloat64ExtractLowWord32) \
V(MipsFloat64ExtractHighWord32) \
V(MipsFloat64InsertLowWord32) \
V(MipsFloat64InsertHighWord32) \
V(MipsFloat64SilenceNaN) \
V(MipsFloat32Max) \
V(MipsFloat64Max) \
V(MipsFloat32Min) \
V(MipsFloat64Min) \
V(MipsPush) \
V(MipsPeek) \
V(MipsStoreToStackSlot) \
V(MipsByteSwap32) \
V(MipsStackClaim) \
V(MipsSeb) \
V(MipsSeh) \
V(MipsSync) \
V(MipsS128Zero) \
V(MipsI32x4Splat) \
V(MipsI32x4ExtractLane) \
V(MipsI32x4ReplaceLane) \
V(MipsI32x4Add) \
V(MipsI32x4Sub) \
V(MipsF64x2Abs) \
V(MipsF64x2Neg) \
V(MipsF64x2Sqrt) \
V(MipsF64x2Add) \
V(MipsF64x2Sub) \
V(MipsF64x2Mul) \
V(MipsF64x2Div) \
V(MipsF64x2Min) \
V(MipsF64x2Max) \
V(MipsF64x2Eq) \
V(MipsF64x2Ne) \
V(MipsF64x2Lt) \
V(MipsF64x2Le) \
V(MipsF64x2Pmin) \
V(MipsF64x2Pmax) \
V(MipsF64x2Ceil) \
V(MipsF64x2Floor) \
V(MipsF64x2Trunc) \
V(MipsF64x2NearestInt) \
V(MipsF64x2ConvertLowI32x4S) \
V(MipsF64x2ConvertLowI32x4U) \
V(MipsF64x2PromoteLowF32x4) \
V(MipsI64x2Add) \
V(MipsI64x2Sub) \
V(MipsI64x2Mul) \
V(MipsI64x2Neg) \
V(MipsI64x2Shl) \
V(MipsI64x2ShrS) \
V(MipsI64x2ShrU) \
V(MipsI64x2BitMask) \
V(MipsI64x2Eq) \
V(MipsI64x2Ne) \
V(MipsI64x2GtS) \
V(MipsI64x2GeS) \
V(MipsI64x2Abs) \
V(MipsI64x2SConvertI32x4Low) \
V(MipsI64x2SConvertI32x4High) \
V(MipsI64x2UConvertI32x4Low) \
V(MipsI64x2UConvertI32x4High) \
V(MipsI64x2ExtMulLowI32x4S) \
V(MipsI64x2ExtMulHighI32x4S) \
V(MipsI64x2ExtMulLowI32x4U) \
V(MipsI64x2ExtMulHighI32x4U) \
V(MipsF32x4Splat) \
V(MipsF32x4ExtractLane) \
V(MipsF32x4ReplaceLane) \
V(MipsF32x4SConvertI32x4) \
V(MipsF32x4UConvertI32x4) \
V(MipsF32x4DemoteF64x2Zero) \
V(MipsI32x4Mul) \
V(MipsI32x4MaxS) \
V(MipsI32x4MinS) \
V(MipsI32x4Eq) \
V(MipsI32x4Ne) \
V(MipsI32x4Shl) \
V(MipsI32x4ShrS) \
V(MipsI32x4ShrU) \
V(MipsI32x4MaxU) \
V(MipsI32x4MinU) \
V(MipsF64x2Splat) \
V(MipsF64x2ExtractLane) \
V(MipsF64x2ReplaceLane) \
V(MipsF32x4Abs) \
V(MipsF32x4Neg) \
V(MipsF32x4Sqrt) \
V(MipsF32x4Add) \
V(MipsF32x4Sub) \
V(MipsF32x4Mul) \
V(MipsF32x4Div) \
V(MipsF32x4Max) \
V(MipsF32x4Min) \
V(MipsF32x4Eq) \
V(MipsF32x4Ne) \
V(MipsF32x4Lt) \
V(MipsF32x4Le) \
V(MipsF32x4Pmin) \
V(MipsF32x4Pmax) \
V(MipsF32x4Ceil) \
V(MipsF32x4Floor) \
V(MipsF32x4Trunc) \
V(MipsF32x4NearestInt) \
V(MipsI32x4SConvertF32x4) \
V(MipsI32x4UConvertF32x4) \
V(MipsI32x4Neg) \
V(MipsI32x4GtS) \
V(MipsI32x4GeS) \
V(MipsI32x4GtU) \
V(MipsI32x4GeU) \
V(MipsI32x4Abs) \
V(MipsI32x4BitMask) \
V(MipsI32x4DotI16x8S) \
V(MipsI32x4ExtMulLowI16x8S) \
V(MipsI32x4ExtMulHighI16x8S) \
V(MipsI32x4ExtMulLowI16x8U) \
V(MipsI32x4ExtMulHighI16x8U) \
V(MipsI32x4TruncSatF64x2SZero) \
V(MipsI32x4TruncSatF64x2UZero) \
V(MipsI32x4ExtAddPairwiseI16x8S) \
V(MipsI32x4ExtAddPairwiseI16x8U) \
V(MipsI16x8Splat) \
V(MipsI16x8ExtractLaneU) \
V(MipsI16x8ExtractLaneS) \
V(MipsI16x8ReplaceLane) \
V(MipsI16x8Neg) \
V(MipsI16x8Shl) \
V(MipsI16x8ShrS) \
V(MipsI16x8ShrU) \
V(MipsI16x8Add) \
V(MipsI16x8AddSatS) \
V(MipsI16x8Sub) \
V(MipsI16x8SubSatS) \
V(MipsI16x8Mul) \
V(MipsI16x8MaxS) \
V(MipsI16x8MinS) \
V(MipsI16x8Eq) \
V(MipsI16x8Ne) \
V(MipsI16x8GtS) \
V(MipsI16x8GeS) \
V(MipsI16x8AddSatU) \
V(MipsI16x8SubSatU) \
V(MipsI16x8MaxU) \
V(MipsI16x8MinU) \
V(MipsI16x8GtU) \
V(MipsI16x8GeU) \
V(MipsI16x8RoundingAverageU) \
V(MipsI16x8Abs) \
V(MipsI16x8BitMask) \
V(MipsI16x8Q15MulRSatS) \
V(MipsI16x8ExtMulLowI8x16S) \
V(MipsI16x8ExtMulHighI8x16S) \
V(MipsI16x8ExtMulLowI8x16U) \
V(MipsI16x8ExtMulHighI8x16U) \
V(MipsI16x8ExtAddPairwiseI8x16S) \
V(MipsI16x8ExtAddPairwiseI8x16U) \
V(MipsI8x16Splat) \
V(MipsI8x16ExtractLaneU) \
V(MipsI8x16ExtractLaneS) \
V(MipsI8x16ReplaceLane) \
V(MipsI8x16Neg) \
V(MipsI8x16Shl) \
V(MipsI8x16ShrS) \
V(MipsI8x16Add) \
V(MipsI8x16AddSatS) \
V(MipsI8x16Sub) \
V(MipsI8x16SubSatS) \
V(MipsI8x16MaxS) \
V(MipsI8x16MinS) \
V(MipsI8x16Eq) \
V(MipsI8x16Ne) \
V(MipsI8x16GtS) \
V(MipsI8x16GeS) \
V(MipsI8x16ShrU) \
V(MipsI8x16AddSatU) \
V(MipsI8x16SubSatU) \
V(MipsI8x16MaxU) \
V(MipsI8x16MinU) \
V(MipsI8x16GtU) \
V(MipsI8x16GeU) \
V(MipsI8x16RoundingAverageU) \
V(MipsI8x16Abs) \
V(MipsI8x16Popcnt) \
V(MipsI8x16BitMask) \
V(MipsS128And) \
V(MipsS128Or) \
V(MipsS128Xor) \
V(MipsS128Not) \
V(MipsS128Select) \
V(MipsS128AndNot) \
V(MipsI64x2AllTrue) \
V(MipsI32x4AllTrue) \
V(MipsI16x8AllTrue) \
V(MipsI8x16AllTrue) \
V(MipsV128AnyTrue) \
V(MipsS32x4InterleaveRight) \
V(MipsS32x4InterleaveLeft) \
V(MipsS32x4PackEven) \
V(MipsS32x4PackOdd) \
V(MipsS32x4InterleaveEven) \
V(MipsS32x4InterleaveOdd) \
V(MipsS32x4Shuffle) \
V(MipsS16x8InterleaveRight) \
V(MipsS16x8InterleaveLeft) \
V(MipsS16x8PackEven) \
V(MipsS16x8PackOdd) \
V(MipsS16x8InterleaveEven) \
V(MipsS16x8InterleaveOdd) \
V(MipsS16x4Reverse) \
V(MipsS16x2Reverse) \
V(MipsS8x16InterleaveRight) \
V(MipsS8x16InterleaveLeft) \
V(MipsS8x16PackEven) \
V(MipsS8x16PackOdd) \
V(MipsS8x16InterleaveEven) \
V(MipsS8x16InterleaveOdd) \
V(MipsI8x16Shuffle) \
V(MipsI8x16Swizzle) \
V(MipsS8x16Concat) \
V(MipsS8x8Reverse) \
V(MipsS8x4Reverse) \
V(MipsS8x2Reverse) \
V(MipsS128Load8Splat) \
V(MipsS128Load16Splat) \
V(MipsS128Load32Splat) \
V(MipsS128Load64Splat) \
V(MipsS128Load8x8S) \
V(MipsS128Load8x8U) \
V(MipsS128Load16x4S) \
V(MipsS128Load16x4U) \
V(MipsS128Load32x2S) \
V(MipsS128Load32x2U) \
V(MipsMsaLd) \
V(MipsMsaSt) \
V(MipsI32x4SConvertI16x8Low) \
V(MipsI32x4SConvertI16x8High) \
V(MipsI32x4UConvertI16x8Low) \
V(MipsI32x4UConvertI16x8High) \
V(MipsI16x8SConvertI8x16Low) \
V(MipsI16x8SConvertI8x16High) \
V(MipsI16x8SConvertI32x4) \
V(MipsI16x8UConvertI32x4) \
V(MipsI16x8UConvertI8x16Low) \
V(MipsI16x8UConvertI8x16High) \
V(MipsI8x16SConvertI16x8) \
V(MipsI8x16UConvertI16x8) \
V(MipsWord32AtomicPairLoad) \
V(MipsWord32AtomicPairStore) \
V(MipsWord32AtomicPairAdd) \
V(MipsWord32AtomicPairSub) \
V(MipsWord32AtomicPairAnd) \
V(MipsWord32AtomicPairOr) \
V(MipsWord32AtomicPairXor) \
V(MipsWord32AtomicPairExchange) \
V(MipsWord32AtomicPairCompareExchange)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
// are encoded into the InstructionCode of the instruction and tell the
// code generator after register allocation which assembler method to call.
//
// We use the following local notation for addressing modes:
//
// R = register
// O = register or stack slot
// D = double register
// I = immediate (handle, external, int32)
// MRI = [register + immediate]
// MRR = [register + register]
// TODO(plind): Add the new r6 address modes.
#define TARGET_ADDRESSING_MODE_LIST(V) \
V(MRI) /* [%r0 + K] */ \
V(MRR) /* [%r0 + %r1] */
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COMPILER_BACKEND_MIPS_INSTRUCTION_CODES_MIPS_H_

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -65,15 +65,6 @@ namespace {
#define CALLEE_SAVE_FP_REGISTERS d8, d9, d10, d11, d12, d13, d14, d15
#elif V8_TARGET_ARCH_MIPS
// ===========================================================================
// == mips ===================================================================
// ===========================================================================
#define STACK_SHADOW_WORDS 4
#define PARAM_REGISTERS a0, a1, a2, a3
#define CALLEE_SAVE_REGISTERS s0, s1, s2, s3, s4, s5, s6, s7
#define CALLEE_SAVE_FP_REGISTERS f20, f22, f24, f26, f28, f30
#elif V8_TARGET_ARCH_MIPS64
// ===========================================================================
// == mips64 =================================================================

@ -1,34 +0,0 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/deoptimizer/deoptimizer.h"
namespace v8 {
namespace internal {
const int Deoptimizer::kEagerDeoptExitSize = 3 * kInstrSize;
const int Deoptimizer::kLazyDeoptExitSize = 3 * kInstrSize;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
return Float32::FromBits(
static_cast<uint32_t>(double_registers_[n].get_bits()));
}
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
// No embedded constant pool support.
UNREACHABLE();
}
void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
} // namespace internal
} // namespace v8

File diff suppressed because it is too large Load Diff

@ -1,14 +0,0 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/diagnostics/unwinder.h"
namespace v8 {
struct RegisterState;
void GetCalleeSavedRegistersFromEntryFrame(void* fp,
RegisterState* register_state) {}
} // namespace v8

@ -85,7 +85,6 @@ class LinuxPerfJitLogger : public CodeEventLogger {
static const uint32_t kElfMachIA32 = 3;
static const uint32_t kElfMachX64 = 62;
static const uint32_t kElfMachARM = 40;
static const uint32_t kElfMachMIPS = 8;
static const uint32_t kElfMachMIPS64 = 8;
static const uint32_t kElfMachLOONG64 = 258;
static const uint32_t kElfMachARM64 = 183;
@ -100,8 +99,6 @@ class LinuxPerfJitLogger : public CodeEventLogger {
return kElfMachX64;
#elif V8_TARGET_ARCH_ARM
return kElfMachARM;
#elif V8_TARGET_ARCH_MIPS
return kElfMachMIPS;
#elif V8_TARGET_ARCH_MIPS64
return kElfMachMIPS64;
#elif V8_TARGET_ARCH_LOONG64

@ -18,8 +18,6 @@
#include "src/codegen/x64/register-x64.h"
#elif V8_HOST_ARCH_LOONG64 && V8_TARGET_ARCH_LOONG64
#include "src/codegen/loong64/register-loong64.h"
#elif V8_HOST_ARCH_MIPS && V8_TARGET_ARCH_MIPS
#include "src/codegen/mips/register-mips.h"
#elif V8_HOST_ARCH_MIPS64 && V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/register-mips64.h"
#endif
@ -52,9 +50,6 @@ namespace internal {
#elif V8_HOST_ARCH_LOONG64 && V8_TARGET_ARCH_LOONG64
#define CLOBBER_REGISTER(R) __asm__ volatile("movgr2fr.d $" #R ",$zero" :::);
#elif V8_HOST_ARCH_MIPS && V8_TARGET_ARCH_MIPS
#define CLOBBER_USE_REGISTER(R) __asm__ volatile("mtc1 $zero,$" #R :::);
#elif V8_HOST_ARCH_MIPS64 && V8_TARGET_ARCH_MIPS64
#define CLOBBER_USE_REGISTER(R) __asm__ volatile("dmtc1 $zero,$" #R :::);

@ -416,8 +416,6 @@ inline static int FrameSlotToFPOffset(int slot) {
#include "src/execution/arm/frame-constants-arm.h"
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/execution/ppc/frame-constants-ppc.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/execution/mips/frame-constants-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/execution/mips64/frame-constants-mips64.h"
#elif V8_TARGET_ARCH_LOONG64

@ -1,32 +0,0 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_MIPS
#include "src/execution/mips/frame-constants-mips.h"
#include "src/codegen/mips/assembler-mips-inl.h"
#include "src/execution/frame-constants.h"
#include "src/execution/frames.h"
namespace v8 {
namespace internal {
Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() { UNREACHABLE(); }
int UnoptimizedFrameConstants::RegisterStackSlotCount(int register_count) {
return register_count;
}
int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) {
USE(register_count);
return 0;
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_MIPS

@ -1,84 +0,0 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_EXECUTION_MIPS_FRAME_CONSTANTS_MIPS_H_
#define V8_EXECUTION_MIPS_FRAME_CONSTANTS_MIPS_H_
#include "src/base/bits.h"
#include "src/base/macros.h"
#include "src/codegen/register.h"
#include "src/execution/frame-constants.h"
namespace v8 {
namespace internal {
class EntryFrameConstants : public AllStatic {
public:
// This is the offset to where JSEntry pushes the current value of
// Isolate::c_entry_fp onto the stack.
static constexpr int kCallerFPOffset = -3 * kSystemPointerSize;
// Stack offsets for arguments passed to JSEntry.
static constexpr int kArgcOffset = +0 * kSystemPointerSize;
static constexpr int kArgvOffset = +1 * kSystemPointerSize;
};
class WasmCompileLazyFrameConstants : public TypedFrameConstants {
public:
static constexpr int kNumberOfSavedGpParamRegs = 3;
static constexpr int kNumberOfSavedFpParamRegs = 7;
static constexpr int kNumberOfSavedAllParamRegs = 10;
// FP-relative.
// See Generate_WasmCompileLazy in builtins-mips.cc.
static constexpr int kWasmInstanceOffset =
TYPED_FRAME_PUSHED_VALUE_OFFSET(kNumberOfSavedAllParamRegs);
static constexpr int kFixedFrameSizeFromFp =
TypedFrameConstants::kFixedFrameSizeFromFp +
kNumberOfSavedGpParamRegs * kPointerSize +
kNumberOfSavedFpParamRegs * kDoubleSize;
};
// Frame constructed by the {WasmDebugBreak} builtin.
// After pushing the frame type marker, the builtin pushes all Liftoff cache
// registers (see liftoff-assembler-defs.h).
class WasmDebugBreakFrameConstants : public TypedFrameConstants {
public:
// {v0, v1, a0, a1, a2, a3, t0, t1, t2, t3, t4, t5, t6, s7}
static constexpr RegList kPushedGpRegs = {v0, v1, a0, a1, a2, a3, t0,
t1, t2, t3, t4, t5, t6, s7};
// {f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20, f22, f24}
static constexpr DoubleRegList kPushedFpRegs = {
f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20, f22, f24};
static constexpr int kNumPushedGpRegisters = kPushedGpRegs.Count();
static constexpr int kNumPushedFpRegisters = kPushedFpRegs.Count();
static constexpr int kLastPushedGpRegisterOffset =
-kFixedFrameSizeFromFp - kNumPushedGpRegisters * kSystemPointerSize;
static constexpr int kLastPushedFpRegisterOffset =
kLastPushedGpRegisterOffset - kNumPushedFpRegisters * kDoubleSize;
// Offsets are fp-relative.
static int GetPushedGpRegisterOffset(int reg_code) {
DCHECK_NE(0, kPushedGpRegs.bits() & (1 << reg_code));
uint32_t lower_regs =
kPushedGpRegs.bits() & ((uint32_t{1} << reg_code) - 1);
return kLastPushedGpRegisterOffset +
base::bits::CountPopulation(lower_regs) * kSystemPointerSize;
}
static int GetPushedFpRegisterOffset(int reg_code) {
DCHECK_NE(0, kPushedFpRegs.bits() & (1 << reg_code));
uint32_t lower_regs =
kPushedFpRegs.bits() & ((uint32_t{1} << reg_code) - 1);
return kLastPushedFpRegisterOffset +
base::bits::CountPopulation(lower_regs) * kDoubleSize;
}
};
} // namespace internal
} // namespace v8
#endif // V8_EXECUTION_MIPS_FRAME_CONSTANTS_MIPS_H_

File diff suppressed because it is too large Load Diff

@ -1,719 +0,0 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Declares a Simulator for MIPS instructions if we are not generating a native
// MIPS binary. This Simulator allows us to run and debug MIPS code generation
// on regular desktop machines.
// V8 calls into generated code via the GeneratedCode wrapper,
// which will start execution in the Simulator or forwards to the real entry
// on a MIPS HW platform.
#ifndef V8_EXECUTION_MIPS_SIMULATOR_MIPS_H_
#define V8_EXECUTION_MIPS_SIMULATOR_MIPS_H_
// globals.h defines USE_SIMULATOR.
#include "src/common/globals.h"
template <typename T>
int Compare(const T& a, const T& b) {
if (a == b)
return 0;
else if (a < b)
return -1;
else
return 1;
}
// Returns the negative absolute value of its argument.
template <typename T,
typename = typename std::enable_if<std::is_signed<T>::value>::type>
T Nabs(T a) {
return a < 0 ? a : -a;
}
#if defined(USE_SIMULATOR)
// Running with a simulator.
#include "src/base/hashmap.h"
#include "src/base/strings.h"
#include "src/codegen/assembler.h"
#include "src/codegen/mips/constants-mips.h"
#include "src/execution/simulator-base.h"
#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
// Utility functions
class CachePage {
public:
static const int LINE_VALID = 0;
static const int LINE_INVALID = 1;
static const int kPageShift = 12;
static const int kPageSize = 1 << kPageShift;
static const int kPageMask = kPageSize - 1;
static const int kLineShift = 2; // The cache line is only 4 bytes right now.
static const int kLineLength = 1 << kLineShift;
static const int kLineMask = kLineLength - 1;
CachePage() { memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); }
char* ValidityByte(int offset) {
return &validity_map_[offset >> kLineShift];
}
char* CachedData(int offset) { return &data_[offset]; }
private:
char data_[kPageSize]; // The cached data.
static const int kValidityMapSize = kPageSize >> kLineShift;
char validity_map_[kValidityMapSize]; // One byte per line.
};
class SimInstructionBase : public InstructionBase {
public:
Type InstructionType() const { return type_; }
inline Instruction* instr() const { return instr_; }
inline int32_t operand() const { return operand_; }
protected:
SimInstructionBase() : operand_(-1), instr_(nullptr), type_(kUnsupported) {}
explicit SimInstructionBase(Instruction* instr) {}
int32_t operand_;
Instruction* instr_;
Type type_;
private:
DISALLOW_ASSIGN(SimInstructionBase);
};
class SimInstruction : public InstructionGetters<SimInstructionBase> {
public:
SimInstruction() {}
explicit SimInstruction(Instruction* instr) { *this = instr; }
SimInstruction& operator=(Instruction* instr) {
operand_ = *reinterpret_cast<const int32_t*>(instr);
instr_ = instr;
type_ = InstructionBase::InstructionType();
DCHECK(reinterpret_cast<void*>(&operand_) == this);
return *this;
}
};
class Simulator : public SimulatorBase {
public:
friend class MipsDebugger;
// Registers are declared in order. See SMRL chapter 2.
enum Register {
no_reg = -1,
zero_reg = 0,
at,
v0,
v1,
a0,
a1,
a2,
a3,
t0,
t1,
t2,
t3,
t4,
t5,
t6,
t7,
s0,
s1,
s2,
s3,
s4,
s5,
s6,
s7,
t8,
t9,
k0,
k1,
gp,
sp,
s8,
ra,
// LO, HI, and pc.
LO,
HI,
pc, // pc must be the last register.
kNumSimuRegisters,
// aliases
fp = s8
};
// Coprocessor registers.
// Generated code will always use doubles. So we will only use even registers.
enum FPURegister {
f0,
f1,
f2,
f3,
f4,
f5,
f6,
f7,
f8,
f9,
f10,
f11,
f12,
f13,
f14,
f15, // f12 and f14 are arguments FPURegisters.
f16,
f17,
f18,
f19,
f20,
f21,
f22,
f23,
f24,
f25,
f26,
f27,
f28,
f29,
f30,
f31,
kNumFPURegisters
};
// MSA registers
enum MSARegister {
w0,
w1,
w2,
w3,
w4,
w5,
w6,
w7,
w8,
w9,
w10,
w11,
w12,
w13,
w14,
w15,
w16,
w17,
w18,
w19,
w20,
w21,
w22,
w23,
w24,
w25,
w26,
w27,
w28,
w29,
w30,
w31,
kNumMSARegisters
};
explicit Simulator(Isolate* isolate);
~Simulator();
// The currently executing Simulator instance. Potentially there can be one
// for each native thread.
V8_EXPORT_PRIVATE static Simulator* current(v8::internal::Isolate* isolate);
// Accessors for register state. Reading the pc value adheres to the MIPS
// architecture specification and is off by a 8 from the currently executing
// instruction.
void set_register(int reg, int32_t value);
void set_dw_register(int dreg, const int* dbl);
int32_t get_register(int reg) const;
double get_double_from_register_pair(int reg);
// Same for FPURegisters.
void set_fpu_register(int fpureg, int64_t value);
void set_fpu_register_word(int fpureg, int32_t value);
void set_fpu_register_hi_word(int fpureg, int32_t value);
void set_fpu_register_float(int fpureg, float value);
void set_fpu_register_double(int fpureg, double value);
void set_fpu_register_invalid_result64(float original, float rounded);
void set_fpu_register_invalid_result(float original, float rounded);
void set_fpu_register_word_invalid_result(float original, float rounded);
void set_fpu_register_invalid_result64(double original, double rounded);
void set_fpu_register_invalid_result(double original, double rounded);
void set_fpu_register_word_invalid_result(double original, double rounded);
int64_t get_fpu_register(int fpureg) const;
int32_t get_fpu_register_word(int fpureg) const;
int32_t get_fpu_register_signed_word(int fpureg) const;
int32_t get_fpu_register_hi_word(int fpureg) const;
float get_fpu_register_float(int fpureg) const;
double get_fpu_register_double(int fpureg) const;
template <typename T>
void get_msa_register(int wreg, T* value);
template <typename T>
void set_msa_register(int wreg, const T* value);
void set_fcsr_bit(uint32_t cc, bool value);
bool test_fcsr_bit(uint32_t cc);
void clear_fcsr_cause();
void set_fcsr_rounding_mode(FPURoundingMode mode);
void set_msacsr_rounding_mode(FPURoundingMode mode);
unsigned int get_fcsr_rounding_mode();
unsigned int get_msacsr_rounding_mode();
bool set_fcsr_round_error(double original, double rounded);
bool set_fcsr_round_error(float original, float rounded);
bool set_fcsr_round64_error(double original, double rounded);
bool set_fcsr_round64_error(float original, float rounded);
void round_according_to_fcsr(double toRound, double* rounded,
int32_t* rounded_int, double fs);
void round_according_to_fcsr(float toRound, float* rounded,
int32_t* rounded_int, float fs);
template <typename Tfp, typename Tint>
void round_according_to_msacsr(Tfp toRound, Tfp* rounded, Tint* rounded_int);
void round64_according_to_fcsr(double toRound, double* rounded,
int64_t* rounded_int, double fs);
void round64_according_to_fcsr(float toRound, float* rounded,
int64_t* rounded_int, float fs);
// Special case of set_register and get_register to access the raw PC value.
void set_pc(int32_t value);
int32_t get_pc() const;
Address get_sp() const { return static_cast<Address>(get_register(sp)); }
// Accessor to the internal simulator stack area.
uintptr_t StackLimit(uintptr_t c_limit) const;
// Executes MIPS instructions until the PC reaches end_sim_pc.
void Execute();
template <typename Return, typename... Args>
Return Call(Address entry, Args... args) {
return VariadicCall<Return>(this, &Simulator::CallImpl, entry, args...);
}
// Alternative: call a 2-argument double function.
double CallFP(Address entry, double d0, double d1);
// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
// Pop an address from the JS stack.
uintptr_t PopAddress();
// Debugger input.
void set_last_debugger_input(char* input);
char* last_debugger_input() { return last_debugger_input_; }
// Redirection support.
static void SetRedirectInstruction(Instruction* instruction);
// ICache checking.
static bool ICacheMatch(void* one, void* two);
static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
size_t size);
// Returns true if pc register contains one of the 'special_values' defined
// below (bad_ra, end_sim_pc).
bool has_bad_pc() const;
private:
enum special_values {
// Known bad pc value to ensure that the simulator does not execute
// without being properly setup.
bad_ra = -1,
// A pc value used to signal the simulator to stop execution. Generally
// the ra is set to this value on transition from native C code to
// simulated execution, so that the simulator can "return" to the native
// C code.
end_sim_pc = -2,
// Unpredictable value.
Unpredictable = 0xbadbeaf
};
V8_EXPORT_PRIVATE intptr_t CallImpl(Address entry, int argument_count,
const intptr_t* arguments);
// Unsupported instructions use Format to print an error and stop execution.
void Format(Instruction* instr, const char* format);
// Helpers for data value tracing.
enum TraceType { BYTE, HALF, WORD, DWORD, FLOAT, DOUBLE, FLOAT_DOUBLE };
// MSA Data Format
enum MSADataFormat { MSA_VECT = 0, MSA_BYTE, MSA_HALF, MSA_WORD, MSA_DWORD };
union msa_reg_t {
int8_t b[kMSALanesByte];
uint8_t ub[kMSALanesByte];
int16_t h[kMSALanesHalf];
uint16_t uh[kMSALanesHalf];
int32_t w[kMSALanesWord];
uint32_t uw[kMSALanesWord];
int64_t d[kMSALanesDword];
uint64_t ud[kMSALanesDword];
};
// Read and write memory.
inline uint32_t ReadBU(int32_t addr);
inline int32_t ReadB(int32_t addr);
inline void WriteB(int32_t addr, uint8_t value);
inline void WriteB(int32_t addr, int8_t value);
inline uint16_t ReadHU(int32_t addr, Instruction* instr);
inline int16_t ReadH(int32_t addr, Instruction* instr);
// Note: Overloaded on the sign of the value.
inline void WriteH(int32_t addr, uint16_t value, Instruction* instr);
inline void WriteH(int32_t addr, int16_t value, Instruction* instr);
inline int ReadW(int32_t addr, Instruction* instr, TraceType t = WORD);
inline void WriteW(int32_t addr, int value, Instruction* instr);
void WriteConditionalW(int32_t addr, int32_t value, Instruction* instr,
int32_t rt_reg);
inline double ReadD(int32_t addr, Instruction* instr);
inline void WriteD(int32_t addr, double value, Instruction* instr);
template <typename T>
T ReadMem(int32_t addr, Instruction* instr);
template <typename T>
void WriteMem(int32_t addr, T value, Instruction* instr);
void TraceRegWr(int32_t value, TraceType t = WORD);
void TraceRegWr(int64_t value, TraceType t = DWORD);
template <typename T>
void TraceMSARegWr(T* value, TraceType t);
template <typename T>
void TraceMSARegWr(T* value);
void TraceMemWr(int32_t addr, int32_t value, TraceType t = WORD);
void TraceMemRd(int32_t addr, int32_t value, TraceType t = WORD);
void TraceMemWr(int32_t addr, int64_t value, TraceType t = DWORD);
void TraceMemRd(int32_t addr, int64_t value, TraceType t = DWORD);
template <typename T>
void TraceMemRd(int32_t addr, T value);
template <typename T>
void TraceMemWr(int32_t addr, T value);
base::EmbeddedVector<char, 128> trace_buf_;
// Operations depending on endianness.
// Get Double Higher / Lower word.
inline int32_t GetDoubleHIW(double* addr);
inline int32_t GetDoubleLOW(double* addr);
// Set Double Higher / Lower word.
inline int32_t SetDoubleHIW(double* addr);
inline int32_t SetDoubleLOW(double* addr);
SimInstruction instr_;
// Executing is handled based on the instruction type.
void DecodeTypeRegister();
// Functions called from DecodeTypeRegister.
void DecodeTypeRegisterCOP1();
void DecodeTypeRegisterCOP1X();
void DecodeTypeRegisterSPECIAL();
void DecodeTypeRegisterSPECIAL2();
void DecodeTypeRegisterSPECIAL3();
// Called from DecodeTypeRegisterCOP1.
void DecodeTypeRegisterSRsType();
void DecodeTypeRegisterDRsType();
void DecodeTypeRegisterWRsType();
void DecodeTypeRegisterLRsType();
int DecodeMsaDataFormat();
void DecodeTypeMsaI8();
void DecodeTypeMsaI5();
void DecodeTypeMsaI10();
void DecodeTypeMsaELM();
void DecodeTypeMsaBIT();
void DecodeTypeMsaMI10();
void DecodeTypeMsa3R();
void DecodeTypeMsa3RF();
void DecodeTypeMsaVec();
void DecodeTypeMsa2R();
void DecodeTypeMsa2RF();
template <typename T>
T MsaI5InstrHelper(uint32_t opcode, T ws, int32_t i5);
template <typename T>
T MsaBitInstrHelper(uint32_t opcode, T wd, T ws, int32_t m);
template <typename T>
T Msa3RInstrHelper(uint32_t opcode, T wd, T ws, T wt);
inline int32_t rs_reg() const { return instr_.RsValue(); }
inline int32_t rs() const { return get_register(rs_reg()); }
inline uint32_t rs_u() const {
return static_cast<uint32_t>(get_register(rs_reg()));
}
inline int32_t rt_reg() const { return instr_.RtValue(); }
inline int32_t rt() const { return get_register(rt_reg()); }
inline uint32_t rt_u() const {
return static_cast<uint32_t>(get_register(rt_reg()));
}
inline int32_t rd_reg() const { return instr_.RdValue(); }
inline int32_t fr_reg() const { return instr_.FrValue(); }
inline int32_t fs_reg() const { return instr_.FsValue(); }
inline int32_t ft_reg() const { return instr_.FtValue(); }
inline int32_t fd_reg() const { return instr_.FdValue(); }
inline int32_t sa() const { return instr_.SaValue(); }
inline int32_t lsa_sa() const { return instr_.LsaSaValue(); }
inline int32_t ws_reg() const { return instr_.WsValue(); }
inline int32_t wt_reg() const { return instr_.WtValue(); }
inline int32_t wd_reg() const { return instr_.WdValue(); }
inline void SetResult(int32_t rd_reg, int32_t alu_out) {
set_register(rd_reg, alu_out);
TraceRegWr(alu_out);
}
inline void SetFPUWordResult(int32_t fd_reg, int32_t alu_out) {
set_fpu_register_word(fd_reg, alu_out);
TraceRegWr(get_fpu_register_word(fd_reg));
}
inline void SetFPUResult(int32_t fd_reg, int64_t alu_out) {
set_fpu_register(fd_reg, alu_out);
TraceRegWr(get_fpu_register(fd_reg));
}
inline void SetFPUFloatResult(int32_t fd_reg, float alu_out) {
set_fpu_register_float(fd_reg, alu_out);
TraceRegWr(get_fpu_register_word(fd_reg), FLOAT);
}
inline void SetFPUDoubleResult(int32_t fd_reg, double alu_out) {
set_fpu_register_double(fd_reg, alu_out);
TraceRegWr(get_fpu_register(fd_reg), DOUBLE);
}
void DecodeTypeImmediate();
void DecodeTypeJump();
// Used for breakpoints and traps.
void SoftwareInterrupt();
// Compact branch guard.
void CheckForbiddenSlot(int32_t current_pc) {
Instruction* instr_after_compact_branch =
reinterpret_cast<Instruction*>(current_pc + kInstrSize);
if (instr_after_compact_branch->IsForbiddenAfterBranch()) {
FATAL(
"Error: Unexpected instruction 0x%08x immediately after a "
"compact branch instruction.",
*reinterpret_cast<uint32_t*>(instr_after_compact_branch));
}
}
// Stop helper functions.
bool IsWatchpoint(uint32_t code);
void PrintWatchpoint(uint32_t code);
void HandleStop(uint32_t code, Instruction* instr);
bool IsStopInstruction(Instruction* instr);
bool IsEnabledStop(uint32_t code);
void EnableStop(uint32_t code);
void DisableStop(uint32_t code);
void IncreaseStopCounter(uint32_t code);
void PrintStopInfo(uint32_t code);
// Executes one instruction.
void InstructionDecode(Instruction* instr);
// Execute one instruction placed in a branch delay slot.
void BranchDelayInstructionDecode(Instruction* instr) {
if (instr->InstructionBits() == nopInstr) {
// Short-cut generic nop instructions. They are always valid and they
// never change the simulator state.
return;
}
if (instr->IsForbiddenInBranchDelay()) {
FATAL("Eror:Unexpected %i opcode in a branch delay slot.",
instr->OpcodeValue());
}
InstructionDecode(instr);
base::SNPrintF(trace_buf_, " ");
}
// ICache.
static void CheckICache(base::CustomMatcherHashMap* i_cache,
Instruction* instr);
static void FlushOnePage(base::CustomMatcherHashMap* i_cache, intptr_t start,
int size);
static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
void* page);
enum Exception {
none,
kIntegerOverflow,
kIntegerUnderflow,
kDivideByZero,
kNumExceptions
};
// Exceptions.
void SignalException(Exception e);
// Handle arguments and return value for runtime FP functions.
void GetFpArgs(double* x, double* y, int32_t* z);
void SetFpResult(const double& result);
void CallInternal(Address entry);
// Architecture state.
// Registers.
int32_t registers_[kNumSimuRegisters];
// Coprocessor Registers.
// Note: FP32 mode uses only the lower 32-bit part of each element,
// the upper 32-bit is unpredictable.
// Note: FPUregisters_[] array is increased to 64 * 8B = 32 * 16B in
// order to support MSA registers
int64_t FPUregisters_[kNumFPURegisters * 2];
// FPU control register.
uint32_t FCSR_;
// MSA control register.
uint32_t MSACSR_;
// Simulator support.
size_t stack_size_;
char* stack_;
bool pc_modified_;
uint64_t icount_;
int break_count_;
// Debugger input.
char* last_debugger_input_;
v8::internal::Isolate* isolate_;
// Registered breakpoints.
Instruction* break_pc_;
Instr break_instr_;
// Stop is disabled if bit 31 is set.
static const uint32_t kStopDisabledBit = 1 << 31;
// A stop is enabled, meaning the simulator will stop when meeting the
// instruction, if bit 31 of watched_stops_[code].count is unset.
// The value watched_stops_[code].count & ~(1 << 31) indicates how many times
// the breakpoint was hit or gone through.
struct StopCountAndDesc {
uint32_t count;
char* desc;
};
StopCountAndDesc watched_stops_[kMaxStopCode + 1];
// Synchronization primitives.
enum class MonitorAccess {
Open,
RMW,
};
enum class TransactionSize {
None = 0,
Word = 4,
};
// The least-significant bits of the address are ignored. The number of bits
// is implementation-defined, between 3 and minimum page size.
static const uintptr_t kExclusiveTaggedAddrMask = ~((1 << 3) - 1);
class LocalMonitor {
public:
LocalMonitor();
// These functions manage the state machine for the local monitor, but do
// not actually perform loads and stores. NotifyStoreConditional only
// returns true if the store conditional is allowed; the global monitor will
// still have to be checked to see whether the memory should be updated.
void NotifyLoad();
void NotifyLoadLinked(uintptr_t addr, TransactionSize size);
void NotifyStore();
bool NotifyStoreConditional(uintptr_t addr, TransactionSize size);
private:
void Clear();
MonitorAccess access_state_;
uintptr_t tagged_addr_;
TransactionSize size_;
};
class GlobalMonitor {
public:
class LinkedAddress {
public:
LinkedAddress();
private:
friend class GlobalMonitor;
// These functions manage the state machine for the global monitor, but do
// not actually perform loads and stores.
void Clear_Locked();
void NotifyLoadLinked_Locked(uintptr_t addr);
void NotifyStore_Locked();
bool NotifyStoreConditional_Locked(uintptr_t addr,
bool is_requesting_thread);
MonitorAccess access_state_;
uintptr_t tagged_addr_;
LinkedAddress* next_;
LinkedAddress* prev_;
// A scd can fail due to background cache evictions. Rather than
// simulating this, we'll just occasionally introduce cases where an
// store conditional fails. This will happen once after every
// kMaxFailureCounter exclusive stores.
static const int kMaxFailureCounter = 5;
int failure_counter_;
};
// Exposed so it can be accessed by Simulator::{Read,Write}Ex*.
base::Mutex mutex;
void NotifyLoadLinked_Locked(uintptr_t addr, LinkedAddress* linked_address);
void NotifyStore_Locked(LinkedAddress* linked_address);
bool NotifyStoreConditional_Locked(uintptr_t addr,
LinkedAddress* linked_address);
// Called when the simulator is destroyed.
void RemoveLinkedAddress(LinkedAddress* linked_address);
static GlobalMonitor* Get();
private:
// Private constructor. Call {GlobalMonitor::Get()} to get the singleton.
GlobalMonitor() = default;
friend class base::LeakyObject<GlobalMonitor>;
bool IsProcessorInLinkedList_Locked(LinkedAddress* linked_address) const;
void PrependProcessor_Locked(LinkedAddress* linked_address);
LinkedAddress* head_ = nullptr;
};
LocalMonitor local_monitor_;
GlobalMonitor::LinkedAddress global_monitor_thread_;
};
} // namespace internal
} // namespace v8
#endif // defined(USE_SIMULATOR)
#endif // V8_EXECUTION_MIPS_SIMULATOR_MIPS_H_

@ -146,7 +146,6 @@ class SimulatorBase {
// The following are trapping instructions used for various architectures:
// - V8_TARGET_ARCH_ARM: svc (Supervisor Call)
// - V8_TARGET_ARCH_ARM64: svc (Supervisor Call)
// - V8_TARGET_ARCH_MIPS: swi (software-interrupt)
// - V8_TARGET_ARCH_MIPS64: swi (software-interrupt)
// - V8_TARGET_ARCH_PPC: svc (Supervisor Call)
// - V8_TARGET_ARCH_PPC64: svc (Supervisor Call)

@ -20,8 +20,6 @@
#include "src/execution/arm/simulator-arm.h"
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/execution/ppc/simulator-ppc.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/execution/mips/simulator-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/execution/mips64/simulator-mips64.h"
#elif V8_TARGET_ARCH_LOONG64

@ -1777,7 +1777,7 @@ DEFINE_IMPLICATION(allow_natives_for_differential_fuzzing, allow_natives_syntax)
DEFINE_IMPLICATION(allow_natives_for_differential_fuzzing, fuzzing)
DEFINE_BOOL(parse_only, false, "only parse the sources")
// simulator-arm.cc, simulator-arm64.cc and simulator-mips.cc
// simulator-arm.cc and simulator-arm64.cc.
#ifdef USE_SIMULATOR
DEFINE_BOOL(trace_sim, false, "Trace simulator execution")
DEFINE_BOOL(debug_sim, false, "Enable debugging the simulator")
@ -1795,7 +1795,7 @@ DEFINE_INT(sim_stack_alignment, 8,
"Stack alingment in bytes in simulator (4 or 8, 8 is default)")
#endif
DEFINE_INT(sim_stack_size, 2 * MB / KB,
"Stack size of the ARM64, MIPS, MIPS64 and PPC64 simulator "
"Stack size of the ARM64, MIPS64 and PPC64 simulator "
"in kBytes (default is 2 MB)")
DEFINE_BOOL(trace_sim_messages, false,
"Trace simulator debug messages. Implied by --trace-sim.")

@ -1,48 +0,0 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Push all callee-saved registers to get them on the stack for conservative
// stack scanning.
//
// See asm/x64/push_registers_clang.cc for why the function is not generated
// using clang.
//
// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
// GN toolchain (e.g. ChromeOS) and not provide them.
asm(".set noreorder \n"
".global PushAllRegistersAndIterateStack \n"
".type PushAllRegistersAndIterateStack, %function \n"
".hidden PushAllRegistersAndIterateStack \n"
"PushAllRegistersAndIterateStack: \n"
// Push all callee-saved registers and save return address.
" addiu $sp, $sp, -48 \n"
" sw $ra, 44($sp) \n"
" sw $s8, 40($sp) \n"
" sw $sp, 36($sp) \n"
" sw $gp, 32($sp) \n"
" sw $s7, 28($sp) \n"
" sw $s6, 24($sp) \n"
" sw $s5, 20($sp) \n"
" sw $s4, 16($sp) \n"
" sw $s3, 12($sp) \n"
" sw $s2, 8($sp) \n"
" sw $s1, 4($sp) \n"
" sw $s0, 0($sp) \n"
// Maintain frame pointer.
" move $s8, $sp \n"
// Pass 1st parameter (a0) unchanged (Stack*).
// Pass 2nd parameter (a1) unchanged (StackVisitor*).
// Save 3rd parameter (a2; IterateStackCallback).
" move $a3, $a2 \n"
// Call the callback.
" jalr $a3 \n"
// Delay slot: Pass 3rd parameter as sp (stack pointer).
" move $a2, $sp \n"
// Load return address.
" lw $ra, 44($sp) \n"
// Restore frame pointer.
" lw $s8, 40($sp) \n"
" jr $ra \n"
// Delay slot: Discard all callee-saved registers.
" addiu $sp, $sp, 48 \n");

@ -1457,8 +1457,7 @@ void InterpreterAssembler::TraceBytecodeDispatch(TNode<WordT> target_bytecode) {
// static
bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_RISCV64 || \
V8_TARGET_ARCH_RISCV32
#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_RISCV32
return false;
#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC || \

@ -422,10 +422,6 @@ void SignalHandler::FillRegisterState(void* context, RegisterState* state) {
state->fp = reinterpret_cast<void*>(mcontext.regs[29]);
// LR is an alias for x30.
state->lr = reinterpret_cast<void*>(mcontext.regs[30]);
#elif V8_HOST_ARCH_MIPS
state->pc = reinterpret_cast<void*>(mcontext.pc);
state->sp = reinterpret_cast<void*>(mcontext.gregs[29]);
state->fp = reinterpret_cast<void*>(mcontext.gregs[30]);
#elif V8_HOST_ARCH_MIPS64
state->pc = reinterpret_cast<void*>(mcontext.pc);
state->sp = reinterpret_cast<void*>(mcontext.gregs[29]);

@ -692,8 +692,6 @@ void LowLevelLogger::LogCodeInfo() {
const char arch[] = "ppc";
#elif V8_TARGET_ARCH_PPC64
const char arch[] = "ppc64";
#elif V8_TARGET_ARCH_MIPS
const char arch[] = "mips";
#elif V8_TARGET_ARCH_LOONG64
const char arch[] = "loong64";
#elif V8_TARGET_ARCH_ARM64

@ -381,11 +381,10 @@ bool Code::IsIsolateIndependent(Isolate* isolate) {
#if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64) || \
defined(V8_TARGET_ARCH_MIPS64)
return RelocIterator(*this, kModeMask).done();
#elif defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \
defined(V8_TARGET_ARCH_S390) || defined(V8_TARGET_ARCH_IA32) || \
defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_LOONG64) || \
defined(V8_TARGET_ARCH_RISCV32)
#elif defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_S390) || \
defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_RISCV64) || \
defined(V8_TARGET_ARCH_LOONG64) || defined(V8_TARGET_ARCH_RISCV32)
for (RelocIterator it(*this, kModeMask); !it.done(); it.next()) {
// On these platforms we emit relative builtin-to-builtin
// jumps for isolate independent builtins in the snapshot. They are later

@ -106,7 +106,7 @@ bool SimulatorHelper::FillRegisters(Isolate* isolate,
state->sp = reinterpret_cast<void*>(simulator->sp());
state->fp = reinterpret_cast<void*>(simulator->fp());
state->lr = reinterpret_cast<void*>(simulator->lr());
#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_LOONG64
#elif V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_LOONG64
if (!simulator->has_bad_pc()) {
state->pc = reinterpret_cast<void*>(simulator->get_pc());
}

File diff suppressed because it is too large Load Diff

@ -1,231 +0,0 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_REGEXP_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
#define V8_REGEXP_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
#include "src/codegen/macro-assembler.h"
#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
namespace internal {
class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
: public NativeRegExpMacroAssembler {
public:
RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone, Mode mode,
int registers_to_save);
~RegExpMacroAssemblerMIPS() override;
int stack_limit_slack() override;
void AdvanceCurrentPosition(int by) override;
void AdvanceRegister(int reg, int by) override;
void Backtrack() override;
void Bind(Label* label) override;
void CheckAtStart(int cp_offset, Label* on_at_start) override;
void CheckCharacter(uint32_t c, Label* on_equal) override;
void CheckCharacterAfterAnd(uint32_t c, uint32_t mask,
Label* on_equal) override;
void CheckCharacterGT(base::uc16 limit, Label* on_greater) override;
void CheckCharacterLT(base::uc16 limit, Label* on_less) override;
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
void CheckGreedyLoop(Label* on_tos_equals_current_position) override;
void CheckNotAtStart(int cp_offset, Label* on_not_at_start) override;
void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match) override;
void CheckNotBackReferenceIgnoreCase(int start_reg, bool read_backward,
bool unicode,
Label* on_no_match) override;
void CheckNotCharacter(uint32_t c, Label* on_not_equal) override;
void CheckNotCharacterAfterAnd(uint32_t c, uint32_t mask,
Label* on_not_equal) override;
void CheckNotCharacterAfterMinusAnd(base::uc16 c, base::uc16 minus,
base::uc16 mask,
Label* on_not_equal) override;
void CheckCharacterInRange(base::uc16 from, base::uc16 to,
Label* on_in_range) override;
void CheckCharacterNotInRange(base::uc16 from, base::uc16 to,
Label* on_not_in_range) override;
bool CheckCharacterInRangeArray(const ZoneList<CharacterRange>* ranges,
Label* on_in_range) override;
bool CheckCharacterNotInRangeArray(const ZoneList<CharacterRange>* ranges,
Label* on_not_in_range) override;
void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set) override;
// Checks whether the given offset from the current position is before
// the end of the string.
void CheckPosition(int cp_offset, Label* on_outside_input) override;
bool CheckSpecialCharacterClass(StandardCharacterSet type,
Label* on_no_match) override;
void Fail() override;
Handle<HeapObject> GetCode(Handle<String> source) override;
void GoTo(Label* label) override;
void IfRegisterGE(int reg, int comparand, Label* if_ge) override;
void IfRegisterLT(int reg, int comparand, Label* if_lt) override;
void IfRegisterEqPos(int reg, Label* if_eq) override;
IrregexpImplementation Implementation() override;
void LoadCurrentCharacterUnchecked(int cp_offset,
int character_count) override;
void PopCurrentPosition() override;
void PopRegister(int register_index) override;
void PushBacktrack(Label* label) override;
void PushCurrentPosition() override;
void PushRegister(int register_index,
StackCheckFlag check_stack_limit) override;
void ReadCurrentPositionFromRegister(int reg) override;
void ReadStackPointerFromRegister(int reg) override;
void SetCurrentPositionFromEnd(int by) override;
void SetRegister(int register_index, int to) override;
bool Succeed() override;
void WriteCurrentPositionToRegister(int reg, int cp_offset) override;
void ClearRegisters(int reg_from, int reg_to) override;
void WriteStackPointerToRegister(int reg) override;
bool CanReadUnaligned() const override;
// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
// returning.
// {raw_code} is an Address because this is called via ExternalReference.
static int CheckStackGuardState(Address* return_address, Address raw_code,
Address re_frame);
private:
// Offsets from frame_pointer() of function parameters and stored registers.
static const int kFramePointer = 0;
// Above the frame pointer - Stored registers and stack passed parameters.
static const int kStoredRegisters = kFramePointer;
// Return address (stored from link register, read into pc on return).
static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize;
// Stack frame header.
static const int kStackFrameHeader = kReturnAddress;
// Stack parameters placed by caller.
static const int kRegisterOutput = kStackFrameHeader + 20;
static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
static const int kDirectCall = kNumOutputRegisters + kPointerSize;
static const int kIsolate = kDirectCall + kPointerSize;
// Below the frame pointer.
// Register parameters stored by setup code.
static const int kInputEnd = kFramePointer - kPointerSize;
static const int kInputStart = kInputEnd - kPointerSize;
static const int kStartIndex = kInputStart - kPointerSize;
static const int kInputString = kStartIndex - kPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kSuccessfulCaptures = kInputString - kPointerSize;
static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize;
// Stores the initial value of the regexp stack pointer in a
// position-independent representation (in case the regexp stack grows and
// thus moves).
static const int kRegExpStackBasePointer =
kBacktrackCount - kSystemPointerSize;
// First register address. Following registers are below it on the stack.
static const int kRegisterZero = kRegExpStackBasePointer - kSystemPointerSize;
// Initial size of code buffer.
static const int kRegExpCodeSize = 1024;
void PushCallerSavedRegisters();
void PopCallerSavedRegisters();
// Check whether preemption has been requested.
void CheckPreemption();
// Check whether we are exceeding the stack limit on the backtrack stack.
void CheckStackLimit();
// Generate a call to CheckStackGuardState.
void CallCheckStackGuardState(Register scratch);
void CallIsCharacterInRangeArray(const ZoneList<CharacterRange>* ranges);
// The ebp-relative location of a regexp register.
MemOperand register_location(int register_index);
// Register holding the current input position as negative offset from
// the end of the string.
static constexpr Register current_input_offset() { return s2; }
// The register containing the current character after LoadCurrentCharacter.
static constexpr Register current_character() { return s5; }
// Register holding address of the end of the input string.
static constexpr Register end_of_input_address() { return s6; }
// Register holding the frame address. Local variables, parameters and
// regexp registers are addressed relative to this.
static constexpr Register frame_pointer() { return fp; }
// The register containing the backtrack stack top. Provides a meaningful
// name to the register.
static constexpr Register backtrack_stackpointer() { return s7; }
// Register holding pointer to the current code object.
static constexpr Register code_pointer() { return s1; }
// Byte size of chars in the string to match (decided by the Mode argument).
inline int char_size() const { return static_cast<int>(mode_); }
// Equivalent to a conditional branch to the label, unless the label
// is nullptr, in which case it is a conditional Backtrack.
void BranchOrBacktrack(Label* to,
Condition condition,
Register rs,
const Operand& rt);
// Call and return internally in the generated code in a way that
// is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
inline void SafeCall(Label* to,
Condition cond,
Register rs,
const Operand& rt);
inline void SafeReturn();
inline void SafeCallTarget(Label* name);
// Pushes the value of a register on the backtrack stack. Decrements the
// stack pointer by a word size and stores the register's value there.
inline void Push(Register source);
// Pops a value from the backtrack stack. Reads the word at the stack pointer
// and increments it by a word size.
inline void Pop(Register target);
void LoadRegExpStackPointerFromMemory(Register dst);
void StoreRegExpStackPointerToMemory(Register src, Register scratch);
void PushRegExpBasePointer(Register stack_pointer, Register scratch);
void PopRegExpBasePointer(Register stack_pointer_out, Register scratch);
Isolate* isolate() const { return masm_->isolate(); }
const std::unique_ptr<MacroAssembler> masm_;
const NoRootArrayScope no_root_array_scope_;
// Which mode to generate code for (Latin1 or UC16).
const Mode mode_;
// One greater than maximal register index actually used.
int num_registers_;
// Number of registers to output at the end (the saved registers
// are always 0..num_saved_registers_-1).
const int num_saved_registers_;
// Labels used internally.
Label entry_label_;
Label start_label_;
Label success_label_;
Label backtrack_label_;
Label exit_label_;
Label check_preempt_label_;
Label stack_overflow_label_;
Label internal_failure_label_;
Label fallback_label_;
};
} // namespace internal
} // namespace v8
#endif // V8_REGEXP_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_

@ -17,8 +17,6 @@
#include "src/regexp/arm/regexp-macro-assembler-arm.h"
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/regexp/ppc/regexp-macro-assembler-ppc.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/regexp/mips/regexp-macro-assembler-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/regexp/mips64/regexp-macro-assembler-mips64.h"
#elif V8_TARGET_ARCH_LOONG64

@ -940,9 +940,6 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
macro_assembler.reset(new RegExpMacroAssemblerPPC(isolate, zone, mode,
output_register_count));
#elif V8_TARGET_ARCH_MIPS
macro_assembler.reset(new RegExpMacroAssemblerMIPS(isolate, zone, mode,
output_register_count));
#elif V8_TARGET_ARCH_MIPS64
macro_assembler.reset(new RegExpMacroAssemblerMIPS(isolate, zone, mode,
output_register_count));

@ -20,9 +20,8 @@ namespace v8 {
namespace internal {
// Other platforms have CSA support, see builtins-sharedarraybuffer-gen.h.
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X || \
V8_TARGET_ARCH_LOONG64
#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_PPC || \
V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_LOONG64
namespace {
@ -611,7 +610,7 @@ RUNTIME_FUNCTION(Runtime_AtomicsOr) { UNREACHABLE(); }
RUNTIME_FUNCTION(Runtime_AtomicsXor) { UNREACHABLE(); }
#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64
#endif // V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64
// || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
// || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_LOONG64 ||
// V8_TARGET_ARCH_RISCV32

@ -29,10 +29,10 @@ class Object;
// Used for platforms with embedded constant pools to trigger deserialization
// of objects found in code.
#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_S390) || \
defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_RISCV32) || \
defined(V8_TARGET_ARCH_RISCV64) || V8_EMBEDDED_CONSTANT_POOL
#if defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_PPC) || \
defined(V8_TARGET_ARCH_S390) || defined(V8_TARGET_ARCH_PPC64) || \
defined(V8_TARGET_ARCH_RISCV32) || defined(V8_TARGET_ARCH_RISCV64) || \
V8_EMBEDDED_CONSTANT_POOL
#define V8_CODE_EMBEDS_OBJECT_POINTER 1
#else
#define V8_CODE_EMBEDS_OBJECT_POINTER 0

@ -223,11 +223,10 @@ void FinalizeEmbeddedCodeTargets(Isolate* isolate, EmbeddedData* blob) {
RelocIterator on_heap_it(code, kRelocMask);
RelocIterator off_heap_it(blob, code, kRelocMask);
#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \
defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_S390) || \
defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_LOONG64) || \
defined(V8_TARGET_ARCH_RISCV32)
#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_IA32) || \
defined(V8_TARGET_ARCH_S390) || defined(V8_TARGET_ARCH_RISCV64) || \
defined(V8_TARGET_ARCH_LOONG64) || defined(V8_TARGET_ARCH_RISCV32)
// On these platforms we emit relative builtin-to-builtin
// jumps for isolate independent builtins in the snapshot. This fixes up the
// relative jumps to the right offsets in the snapshot.

@ -163,8 +163,7 @@ int PlatformEmbeddedFileWriterGeneric::IndentedDataDirective(
DataDirective PlatformEmbeddedFileWriterGeneric::ByteChunkDataDirective()
const {
#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
defined(V8_TARGET_ARCH_LOONG64)
#if defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_LOONG64)
// MIPS and LOONG64 uses a fixed 4 byte instruction set, using .long
// to prevent any unnecessary padding.
return kLong;

@ -20,7 +20,7 @@ namespace internal {
using Address = uintptr_t;
// ----------------------------------------------------------------------------
// Generated memcpy/memmove for ia32, arm, and mips.
// Generated memcpy/memmove for ia32 and arm.
void init_memcopy_functions();
@ -59,24 +59,6 @@ V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src,
// For values < 12, the assembler function is slower than the inlined C code.
const int kMinComplexConvertMemCopy = 12;
#elif defined(V8_HOST_ARCH_MIPS)
using MemCopyUint8Function = void (*)(uint8_t* dest, const uint8_t* src,
size_t size);
V8_EXPORT_PRIVATE extern MemCopyUint8Function memcopy_uint8_function;
V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src,
size_t chars) {
memcpy(dest, src, chars);
}
// For values < 16, the assembler function is slower than the inlined C code.
const size_t kMinComplexMemCopy = 16;
V8_INLINE void MemCopy(void* dest, const void* src, size_t size) {
(*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest),
reinterpret_cast<const uint8_t*>(src), size);
}
V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src,
size_t size) {
memmove(dest, src, size);
}
#else
// Copy memory area to disjoint memory area.
inline void MemCopy(void* dest, const void* src, size_t size) {

@ -1869,8 +1869,6 @@ bool CheckCompatibleStackSlotTypes(ValueKind a, ValueKind b);
#include "src/wasm/baseline/arm/liftoff-assembler-arm.h"
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/wasm/baseline/ppc/liftoff-assembler-ppc.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/wasm/baseline/mips/liftoff-assembler-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/wasm/baseline/mips64/liftoff-assembler-mips64.h"
#elif V8_TARGET_ARCH_LOONG64

@ -320,8 +320,8 @@ void CheckBailoutAllowed(LiftoffBailoutReason reason, const char* detail,
}
// Some externally maintained architectures don't fully implement Liftoff yet.
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_S390X || \
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_LOONG64
#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_PPC || \
V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_LOONG64
return;
#endif

File diff suppressed because it is too large Load Diff

@ -246,7 +246,7 @@ void JumpTableAssembler::NopBytes(int bytes) {
}
}
#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
#elif V8_TARGET_ARCH_MIPS64
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
int start = pc_offset();

@ -61,15 +61,6 @@ constexpr Register kGpReturnRegisters[] = {x0, x1};
constexpr DoubleRegister kFpParamRegisters[] = {d0, d1, d2, d3, d4, d5, d6, d7};
constexpr DoubleRegister kFpReturnRegisters[] = {d0, d1};
#elif V8_TARGET_ARCH_MIPS
// ===========================================================================
// == mips ===================================================================
// ===========================================================================
constexpr Register kGpParamRegisters[] = {a0, a2, a3};
constexpr Register kGpReturnRegisters[] = {v0, v1};
constexpr DoubleRegister kFpParamRegisters[] = {f2, f4, f6, f8, f10, f12, f14};
constexpr DoubleRegister kFpReturnRegisters[] = {f2, f4};
#elif V8_TARGET_ARCH_MIPS64
// ===========================================================================
// == mips64 =================================================================

@ -380,9 +380,9 @@ void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
writer->WriteVector(code->reloc_info());
writer->WriteVector(code->source_positions());
writer->WriteVector(code->protected_instructions_data());
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM || \
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390X || \
V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64
#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_PPC || \
V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_RISCV32 || \
V8_TARGET_ARCH_RISCV64
// On platforms that don't support misaligned word stores, copy to an aligned
// buffer if necessary so we can relocate the serialized code.
std::unique_ptr<byte[]> aligned_buffer;

@ -232,18 +232,6 @@ v8_source_set("cctest_sources") {
"test-assembler-ia32.cc",
"test-log-stack-tracer.cc",
]
} else if (v8_current_cpu == "mips") {
sources += [ ### gcmole(arch:mips) ###
"test-assembler-mips.cc",
"test-disasm-mips.cc",
"test-macro-assembler-mips.cc",
]
} else if (v8_current_cpu == "mipsel") {
sources += [ ### gcmole(arch:mipsel) ###
"test-assembler-mips.cc",
"test-disasm-mips.cc",
"test-macro-assembler-mips.cc",
]
} else if (v8_current_cpu == "mips64") {
sources += [ ### gcmole(arch:mips64) ###
"test-assembler-mips64.cc",
@ -375,8 +363,7 @@ v8_source_set("cctest_sources") {
if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64" ||
v8_current_cpu == "arm" || v8_current_cpu == "arm64" ||
v8_current_cpu == "s390" || v8_current_cpu == "s390x" ||
v8_current_cpu == "mips" || v8_current_cpu == "mips64" ||
v8_current_cpu == "mipsel" || v8_current_cpu == "mipsel64" ||
v8_current_cpu == "mips64" || v8_current_cpu == "mips64el" ||
v8_current_cpu == "riscv64" || v8_current_cpu == "loong64" ||
v8_current_cpu == "riscv32") {
# Disable fmadd/fmsub so that expected results match generated code in

@ -773,7 +773,7 @@ class SimulatorHelper {
state->sp = reinterpret_cast<void*>(simulator_->sp());
state->fp = reinterpret_cast<void*>(simulator_->fp());
state->lr = reinterpret_cast<void*>(simulator_->lr());
#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
#elif V8_TARGET_ARCH_MIPS64
state->pc = reinterpret_cast<void*>(simulator_->get_pc());
state->sp = reinterpret_cast<void*>(
simulator_->get_register(v8::internal::Simulator::sp));

@ -318,28 +318,6 @@
'test-serialize/StartupSerializerTwiceRunScript': [SKIP],
}], # 'arch == arm'
##############################################################################
['arch == mipsel or arch == mips', {
# TODO(mips-team): Improve code-size on large RegExp's.
'test-heap/TestSizeOfRegExpCode': [SKIP],
# BUG(1075): Unresolved crashes on MIPS also.
'test-serialize/StartupSerializerOnce': [SKIP],
'test-serialize/StartupSerializerTwice': [SKIP],
'test-serialize/StartupSerializerOnceRunScript': [SKIP],
'test-serialize/StartupSerializerTwiceRunScript': [SKIP],
}], # 'arch == mipsel or arch == mips'
##############################################################################
['arch == mips', {
# Too slow with TF.
'test-api/ExternalArrays': [PASS, NO_VARIANTS],
# TODO(mips-team): Currently fails on mips board.
'test-api/Threading5': [SKIP],
'test-api/Threading6': [SKIP],
}], # 'arch == mips'
##############################################################################
['arch == mips64', {
# TODO(mips-team): Currently fails on mips64 board.
@ -365,21 +343,21 @@
}], # 'arch == mips64el or arch == mips64'
##############################################################################
['(arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simd_mips', {
['(arch == mips64el or arch == mips64) and not simd_mips', {
# Skip tests that fail on MIPS architectures which don't support SIMD,
# because lowering mechanism doesn't work properly
'test-run-wasm-simd/RunWasm_ReductionTest4_compiled': [SKIP],
'test-run-wasm-simd/RunWasm_ReductionTest8_compiled': [SKIP],
'test-run-wasm-simd/RunWasm_ReductionTest16_compiled': [SKIP],
'test-run-wasm-simd-liftoff/*': [SKIP],
}], # '(arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simd_mips'
}], # '(arch == mips64el or arch == mips64) and not simd_mips'
['(arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simd_mips', {
['(arch == mips64el or arch == mips64) and not simd_mips', {
'test-gc/RunWasmLiftoff_RefTrivialCasts': [SKIP],
'test-gc/RunWasmTurbofan_RefTrivialCasts': [SKIP],
'test-run-wasm/RunWasmLiftoff_Select_s128_parameters': [SKIP],
'test-run-wasm/RunWasmTurbofan_Select_s128_parameters': [SKIP],
}], # '(arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simd_mips'
}], # '(arch == mips64el or arch == mips64) and not simd_mips'
##############################################################################
['mips_arch_variant == r6', {

@ -4428,8 +4428,7 @@ TEST(RunTruncateFloat32ToInt32) {
if (i < upper_bound && i >= lower_bound) {
CHECK_EQ(static_cast<int32_t>(i), m.Call(i));
} else if (i < lower_bound) {
#if (V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64) && !_MIPS_ARCH_MIPS32R6 && \
!_MIPS_ARCH_MIPS64R6
#if V8_TARGET_ARCH_MIPS64 && !_MIPS_ARCH_MIPS64R6
CHECK_EQ(std::numeric_limits<int32_t>::max(), m.Call(i));
#else
CHECK_EQ(std::numeric_limits<int32_t>::min(), m.Call(i));

@ -17038,8 +17038,7 @@ THREADED_TEST(QuietSignalingNaNs) {
} else {
uint64_t stored_bits = DoubleToBits(stored_number);
// Check if quiet nan (bits 51..62 all set).
#if (defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)) && \
!defined(_MIPS_ARCH_MIPS64R6) && !defined(_MIPS_ARCH_MIPS32R6) && \
#if (defined(V8_TARGET_ARCH_MIPS64)) && !defined(_MIPS_ARCH_MIPS64R6) && \
!defined(USE_SIMULATOR)
// Most significant fraction bit for quiet nan is set to 0
// on MIPS architecture. Allowed by IEEE-754.
@ -17060,8 +17059,7 @@ THREADED_TEST(QuietSignalingNaNs) {
} else {
uint64_t stored_bits = DoubleToBits(stored_date);
// Check if quiet nan (bits 51..62 all set).
#if (defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)) && \
!defined(_MIPS_ARCH_MIPS64R6) && !defined(_MIPS_ARCH_MIPS32R6) && \
#if (defined(V8_TARGET_ARCH_MIPS64)) && !defined(_MIPS_ARCH_MIPS64R6) && \
!defined(USE_SIMULATOR)
// Most significant fraction bit for quiet nan is set to 0
// on MIPS architecture. Allowed by IEEE-754.

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -524,7 +524,7 @@ class SeparateIsolatesLocksNonexclusiveThread : public JoinableThread {
// Run parallel threads that lock and access different isolates in parallel
TEST(SeparateIsolatesLocksNonexclusive) {
i::FLAG_always_turbofan = false;
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_S390
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_S390
const int kNThreads = 50;
#else
const int kNThreads = 100;
@ -609,7 +609,7 @@ class LockerUnlockerThread : public JoinableThread {
// Use unlocker inside of a Locker, multiple threads.
TEST(LockerUnlocker) {
i::FLAG_always_turbofan = false;
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_S390
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_S390
const int kNThreads = 50;
#else
const int kNThreads = 100;
@ -667,7 +667,7 @@ class LockTwiceAndUnlockThread : public JoinableThread {
// Use Unlocker inside two Lockers.
TEST(LockTwiceAndUnlock) {
i::FLAG_always_turbofan = false;
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_S390
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_S390
const int kNThreads = 50;
#else
const int kNThreads = 100;

File diff suppressed because it is too large Load Diff

@ -133,7 +133,7 @@
##############################################################################
# Tests requiring Sparkplug.
['arch not in (x64, arm64, ia32, arm, mips64el, mipsel, loong64)', {
['arch not in (x64, arm64, ia32, arm, mips64el, loong64)', {
'regress/regress-crbug-1199681': [SKIP],
'debug/regress/regress-crbug-1357554': [SKIP]
}],

@ -69,10 +69,10 @@
}],
################################################################################
['arch == mips64el or arch == mipsel or arch == riscv64 or arch == loong64', {
['arch == mips64el or arch == riscv64 or arch == loong64', {
# Tests that require Simd enabled.
'wasm-trace-memory': [SKIP],
}], # arch == mips64el or arch == mipsel or arch == riscv64 or arch == loong64
}], # arch == mips64el or arch == riscv64 or arch == loong64
##############################################################################
['no_simd_hardware == True', {

@ -157,11 +157,11 @@
'wasm/compare-exchange64-stress': [PASS, SLOW, NO_VARIANTS],
# Very slow on ARM, MIPS, RISCV and LOONG, contains no architecture dependent code.
'unicode-case-overoptimization0': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips64, mips, riscv64, riscv32, loong64)', SKIP]],
'unicode-case-overoptimization1': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips64, mips, riscv64, riscv32, loong64)', SKIP]],
'regress/regress-3976': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips64, mips, riscv64, riscv32, loong64)', SKIP]],
'regress/regress-crbug-482998': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips, riscv64, riscv32, loong64)', SKIP]],
'regress/regress-740784': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips, riscv64,riscv32, loong64)', SKIP]],
'unicode-case-overoptimization0': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mips64el, mips64, riscv64, riscv32, loong64)', SKIP]],
'unicode-case-overoptimization1': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mips64el, mips64, riscv64, riscv32, loong64)', SKIP]],
'regress/regress-3976': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mips64el, mips64, riscv64, riscv32, loong64)', SKIP]],
'regress/regress-crbug-482998': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mips64el, riscv64, riscv32, loong64)', SKIP]],
'regress/regress-740784': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mips64el, riscv64,riscv32, loong64)', SKIP]],
# TODO(bmeurer): Flaky timeouts (sometimes <1s, sometimes >3m).
'unicodelctest': [PASS, NO_VARIANTS],
@ -256,7 +256,7 @@
##############################################################################
# TODO(ahaas): Port multiple return values to MIPS, S390 and PPC
['arch in (mips, mips64, mipsel, mips64el, s390, s390x, ppc, ppc64)', {
['arch in (mips64, mips64el, s390, s390x, ppc, ppc64)', {
'wasm/multi-value': [SKIP],
}],
@ -480,13 +480,13 @@
##############################################################################
# 32-bit platforms
['arch in (ia32, arm, mips, mipsel, riscv32)', {
['arch in (ia32, arm, riscv32)', {
# Needs >2GB of available contiguous memory.
'wasm/grow-huge-memory': [SKIP],
'wasm/huge-memory': [SKIP],
'wasm/huge-typedarray': [SKIP],
'wasm/bigint-opt': [SKIP],
}], # 'arch in (ia32, arm, mips, mipsel, riscv32)'
}], # 'arch in (ia32, arm, riscv32)'
##############################################################################
['arch == arm64', {
@ -735,60 +735,22 @@
}], # 'arch == arm
##############################################################################
['arch in (mipsel, mips, mips64el, mips64) and not simulator_run', {
['arch in (mips64el, mips64) and not simulator_run', {
# These tests fail occasionally on the buildbots because they consume
# a large amount of memory if executed in parallel. Therefore we
# run only a single instance of these tests
'regress/regress-crbug-514081': [PASS, NO_VARIANTS],
'regress/regress-599414-array-concat-fast-path': [PASS, NO_VARIANTS],
'array-functions-prototype-misc': [PASS, NO_VARIANTS],
}], # 'arch in (mipsel, mips, mips64el, mips64)'
}], # 'arch in (mips64el, mips64)'
##############################################################################
['arch in (mipsel, mips, mips64el, mips64, ppc, ppc64)', {
['arch in (mips64el, mips64, ppc, ppc64)', {
# These tests fail because qNaN and sNaN values are encoded differently on
# MIPS and ARM/x86 architectures
'wasm/float-constant-folding': [SKIP],
}],
##############################################################################
['arch == mipsel or arch == mips', {
# Slow tests which times out in debug mode.
'try': [PASS, ['mode == debug', SKIP]],
'array-constructor': [PASS, ['mode == debug', SKIP]],
# Slow in release mode on MIPS.
'compiler/regress-stacktrace-methods': [PASS, SLOW],
'array-splice': [PASS, SLOW],
# Long running test.
'string-indexof-2': [PASS, SLOW],
# Long running tests. Skipping because having them timeout takes too long on
# the buildbot.
'compiler/alloc-number': [SKIP],
'regress/regress-490': [SKIP],
'regress/regress-create-exception': [SKIP],
'regress/regress-3247124': [SKIP],
# Requires bigger stack size in the Genesis and if stack size is increased,
# the test requires too much time to run. However, the problem test covers
# should be platform-independent.
'regress/regress-1132': [SKIP],
# Currently always deopt on minus zero
'math-floor-of-div-minus-zero': [SKIP],
# Requires too much memory on MIPS.
'regress/regress-779407': [SKIP],
'harmony/bigint/regressions': [SKIP],
# Pre-r6 MIPS32 doesn't have instructions needed to properly handle 64-bit
# atomic instructions.
'wasm/atomics64-stress': [PASS, ['mips_arch_variant != r6', SKIP]],
}], # 'arch == mipsel or arch == mips'
##############################################################################
['arch == mips64el or arch == mips64', {
@ -1325,7 +1287,7 @@
##############################################################################
# Skip Liftoff tests on platforms that do not fully implement Liftoff.
['arch not in (x64, ia32, arm64, arm, s390x, ppc64, mipsel, mips64el, loong64)', {
['arch not in (x64, ia32, arm64, arm, s390x, ppc64, mips64el, loong64)', {
'wasm/liftoff': [SKIP],
'wasm/liftoff-debug': [SKIP],
'wasm/tier-up-testing-flag': [SKIP],
@ -1334,7 +1296,7 @@
'wasm/test-partial-serialization': [SKIP],
'regress/wasm/regress-1248024': [SKIP],
'regress/wasm/regress-1251465': [SKIP],
}], # arch not in (x64, ia32, arm64, arm, s390x, ppc64, mipsel, mips64el, loong64)
}], # arch not in (x64, ia32, arm64, arm, s390x, ppc64, mips64el, loong64)
##############################################################################
['system != linux or sandbox == True', {
@ -1448,13 +1410,13 @@
'regress/wasm/regress-9017': [SKIP],
}], # variant == slow_path
['((arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simd_mips) or (arch in [ppc64])', {
['((arch == mips64el or arch == mips64) and not simd_mips) or (arch in [ppc64])', {
# Requires scalar lowering for 64x2 SIMD instructions, which are not
# implemented yet.
# Also skip tests on archs that don't support SIMD and lowering doesn't yet work correctly.
# Condition copied from cctest.status.
'regress/wasm/regress-10831': [SKIP],
}], # ((arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simd_mips) or (arch in [ppc64])
}], # ((arch == mips64el or arch == mips64) and not simd_mips) or (arch in [ppc64])
##############################################################################
['variant == stress_sampling', {
@ -1542,7 +1504,7 @@
##############################################################################
# TODO(v8:11421): Port baseline compiler to other architectures.
['arch not in (x64, arm64, ia32, arm, mips64el, mipsel, riscv64, riscv32, loong64, s390x) or (arch == s390x and pointer_compression)', {
['arch not in (x64, arm64, ia32, arm, mips64el, riscv64, riscv32, loong64, s390x) or (arch == s390x and pointer_compression)', {
'baseline/*': [SKIP],
'regress/regress-1242306': [SKIP],
}],

@ -249,7 +249,7 @@
'ecma/Date/15.9.2.2-5': [PASS, FAIL],
'ecma/Date/15.9.2.2-6': [PASS, FAIL],
# 1026139: These date tests fail on arm and mips.
# 1026139: These date tests fail on arm.
# These date tests also fail in a time zone without daylight saving time.
'ecma/Date/15.9.5.29-1': [PASS, FAIL],
'ecma/Date/15.9.5.28-1': [PASS, FAIL],
@ -270,8 +270,8 @@
'ecma/Date/15.9.5.18': [PASS, ['no_i18n == False', FAIL]],
'ecma/Date/15.9.5.22-1': [PASS, ['no_i18n == False', FAIL]],
# 1050186: Arm/MIPS vm is broken; probably unrelated to dates
'ecma/Date/15.9.5.22-2': [PASS, ['no_i18n == False or arch == arm or arch == mipsel or arch == mips', FAIL]],
# 1050186: Arm vm is broken; probably unrelated to dates
'ecma/Date/15.9.5.22-2': [PASS, ['no_i18n == False or arch == arm', FAIL]],
# Flaky test that fails due to what appears to be a bug in the test.
# Occurs depending on current time
@ -959,7 +959,7 @@
}], # 'arch == arm64'
['arch == mipsel or arch == mips64el or arch == mips64', {
['arch == mips64el or arch == mips64', {
# BUG(3251229): Times out when running new crankshaft test script.
'ecma_3/RegExp/regress-311414': [SKIP],
@ -976,36 +976,12 @@
# BUG(1040): This test might time out.
'js1_5/GC/regress-203278-2': [PASS, SLOW, NO_VARIANTS],
}], # 'arch == mipsel or arch == mips64el or arch == mips64'
['arch == mipsel and simulator_run', {
# Crashes due to C stack overflow.
'js1_5/extensions/regress-355497': [SKIP],
}], # 'arch == mipsel and simulator_run'
}], # 'arch == mips64el or arch == mips64'
['arch == mips64el and simulator_run', {
'js1_5/extensions/regress-355497': [FAIL_OK, '--sim-stack-size=512'],
}], # 'arch == mips64el and simulator_run'
['arch == mips', {
# BUG(3251229): Times out when running new crankshaft test script.
'ecma_3/RegExp/regress-311414': [SKIP],
'ecma/Date/15.9.5.8': [SKIP],
'ecma/Date/15.9.5.10-2': [SKIP],
'ecma/Date/15.9.5.11-2': [SKIP],
'ecma/Date/15.9.5.12-2': [SKIP],
'js1_5/Array/regress-99120-02': [SKIP],
'js1_5/extensions/regress-371636': [SKIP],
'js1_5/Regress/regress-203278-1': [SKIP],
'js1_5/Regress/regress-404755': [SKIP],
'js1_5/Regress/regress-451322': [SKIP],
# BUG(1040): This test might time out.
'js1_5/GC/regress-203278-2': [PASS, SLOW, NO_VARIANTS],
}], # 'arch == mips'
['arch == arm and simulator_run', {
#BUG(3837): Crashes due to C stack overflow.

@ -835,7 +835,7 @@
'staging/Temporal/ZonedDateTime/old/withTimezone': [FAIL],
}], # no_i18n == True
['arch == arm or arch == mipsel or arch == mips or arch == arm64 or arch == mips64 or arch == mips64el', {
['arch == arm or arch == arm64 or arch == mips64 or arch == mips64el', {
# Causes stack overflow on simulators due to eager compilation of
# parenthesized function literals. Needs investigation.
@ -848,7 +848,7 @@
'built-ins/decodeURIComponent/S15.1.3.2_A2.5_T1': [SKIP],
'built-ins/encodeURI/S15.1.3.3_A2.3_T1': [SKIP],
'built-ins/encodeURIComponent/S15.1.3.4_A2.3_T1': [SKIP],
}], # 'arch == arm or arch == mipsel or arch == mips or arch == arm64'
}], # 'arch == arm or arch == arm64'
['byteorder == big', {
# Test failures on big endian platforms due to the way the tests
@ -897,7 +897,7 @@
'*': [SKIP],
}], # variant == no_wasm_traps
['variant != default or arch == arm or arch == arm64 or arch == mipsel or arch == mips or arch == mips64 or arch == mips64el', {
['variant != default or arch == arm or arch == arm64 or arch == mips64 or arch == mips64el', {
# These tests take a long time to run
'built-ins/RegExp/property-escapes/generated/*': [SKIP],
}], # variant != default or arch == arm or arch == arm64

@ -607,11 +607,6 @@ v8_source_set("unittests_sources") {
"assembler/turbo-assembler-ia32-unittest.cc",
"compiler/ia32/instruction-selector-ia32-unittest.cc",
]
} else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") {
sources += [
"assembler/turbo-assembler-mips-unittest.cc",
"compiler/mips/instruction-selector-mips-unittest.cc",
]
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
sources += [
"assembler/disasm-mips64-unittest.cc",

@ -1,66 +0,0 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/codegen/macro-assembler.h"
#include "src/codegen/mips/assembler-mips-inl.h"
#include "src/execution/simulator.h"
#include "test/common/assembler-tester.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest-support.h"
namespace v8 {
namespace internal {
#define __ tasm.
// Test the x64 assembler by compiling some simple functions into
// a buffer and executing them. These tests do not initialize the
// V8 library, create a context, or use any V8 objects.
class TurboAssemblerTest : public TestWithIsolate {};
TEST_F(TurboAssemblerTest, TestHardAbort) {
auto buffer = AllocateAssemblerBuffer();
TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
__ Abort(AbortReason::kNoReason);
CodeDesc desc;
tasm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
}
TEST_F(TurboAssemblerTest, TestCheck) {
auto buffer = AllocateAssemblerBuffer();
TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
// Fail if the first parameter (in {a0}) is 17.
__ Check(Condition::ne, AbortReason::kNoReason, a0, Operand(17));
__ Ret();
CodeDesc desc;
tasm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
f.Call(0);
f.Call(18);
ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, "abort: no reason");
}
#undef __
} // namespace internal
} // namespace v8

Some files were not shown because too many files have changed in this diff Show More