Copy as Markdown
Other Tools
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
#ifndef jit_AtomicOperationsGenerated_h
#define jit_AtomicOperationsGenerated_h
/* This file is generated by jit/GenerateAtomicOperations.py. Do not edit! */
#include "mozilla/Attributes.h"
namespace js {
namespace jit {
#define JS_HAVE_GENERATED_ATOMIC_OPS 1
inline void AtomicFenceSeqCst() {
asm volatile ("dmb ish\n\t" ::: "memory");
}
inline uint8_t AtomicLoad8SeqCst(const uint8_t* arg) {
uint8_t res;
asm volatile ("ldrb %w[res], [%x[arg]]\n\t"
"dmb ish\n\t"
: [res] "=r" (res)
: [arg] "r" (arg)
: "memory");
return res;
}
inline uint16_t AtomicLoad16SeqCst(const uint16_t* arg) {
uint16_t res;
asm volatile ("ldrh %w[res], [%x[arg]]\n\t"
"dmb ish\n\t"
: [res] "=r" (res)
: [arg] "r" (arg)
: "memory");
return res;
}
inline uint32_t AtomicLoad32SeqCst(const uint32_t* arg) {
uint32_t res;
asm volatile ("ldr %w[res], [%x[arg]]\n\t"
"dmb ish\n\t"
: [res] "=r" (res)
: [arg] "r" (arg)
: "memory");
return res;
}
inline uint64_t AtomicLoad64SeqCst(const uint64_t* arg) {
uint64_t res;
asm volatile ("ldr %x[res], [%x[arg]]\n\t"
"dmb ish\n\t"
: [res] "=r" (res)
: [arg] "r" (arg)
: "memory");
return res;
}
inline uint8_t AtomicLoad8Unsynchronized(const uint8_t* arg) {
uint8_t res;
asm volatile ("ldrb %w[res], [%x[arg]]\n\t"
: [res] "=r" (res)
: [arg] "r" (arg)
: "memory");
return res;
}
inline uint16_t AtomicLoad16Unsynchronized(const uint16_t* arg) {
uint16_t res;
asm volatile ("ldrh %w[res], [%x[arg]]\n\t"
: [res] "=r" (res)
: [arg] "r" (arg)
: "memory");
return res;
}
inline uint32_t AtomicLoad32Unsynchronized(const uint32_t* arg) {
uint32_t res;
asm volatile ("ldr %w[res], [%x[arg]]\n\t"
: [res] "=r" (res)
: [arg] "r" (arg)
: "memory");
return res;
}
inline uint64_t AtomicLoad64Unsynchronized(const uint64_t* arg) {
uint64_t res;
asm volatile ("ldr %x[res], [%x[arg]]\n\t"
: [res] "=r" (res)
: [arg] "r" (arg)
: "memory");
return res;
}
inline void AtomicStore8SeqCst(uint8_t* addr, uint8_t val) {
asm volatile ("dmb ish\n\t"
"strb %w[val], [%x[addr]]\n\t"
"dmb ish\n\t"
:
: [addr] "r" (addr), [val] "r"(val)
: "memory");
}
inline void AtomicStore16SeqCst(uint16_t* addr, uint16_t val) {
asm volatile ("dmb ish\n\t"
"strh %w[val], [%x[addr]]\n\t"
"dmb ish\n\t"
:
: [addr] "r" (addr), [val] "r"(val)
: "memory");
}
inline void AtomicStore32SeqCst(uint32_t* addr, uint32_t val) {
asm volatile ("dmb ish\n\t"
"str %w[val], [%x[addr]]\n\t"
"dmb ish\n\t"
:
: [addr] "r" (addr), [val] "r"(val)
: "memory");
}
inline void AtomicStore64SeqCst(uint64_t* addr, uint64_t val) {
asm volatile ("dmb ish\n\t"
"str %x[val], [%x[addr]]\n\t"
"dmb ish\n\t"
:
: [addr] "r" (addr), [val] "r"(val)
: "memory");
}
inline void AtomicStore8Unsynchronized(uint8_t* addr, uint8_t val) {
asm volatile ("strb %w[val], [%x[addr]]\n\t"
:
: [addr] "r" (addr), [val] "r"(val)
: "memory");
}
inline void AtomicStore16Unsynchronized(uint16_t* addr, uint16_t val) {
asm volatile ("strh %w[val], [%x[addr]]\n\t"
:
: [addr] "r" (addr), [val] "r"(val)
: "memory");
}
inline void AtomicStore32Unsynchronized(uint32_t* addr, uint32_t val) {
asm volatile ("str %w[val], [%x[addr]]\n\t"
:
: [addr] "r" (addr), [val] "r"(val)
: "memory");
}
inline void AtomicStore64Unsynchronized(uint64_t* addr, uint64_t val) {
asm volatile ("str %x[val], [%x[addr]]\n\t"
:
: [addr] "r" (addr), [val] "r"(val)
: "memory");
}
inline uint8_t AtomicExchange8SeqCst(uint8_t* addr, uint8_t val) {
uint8_t res;
uint32_t scratch;
asm volatile ("dmb ish\n\t"
"0:\n\t"
"ldxrb %w[res], [%x[addr]]\n\t"
"stxrb %w[scratch], %w[val], [%x[addr]]\n\t"
"cbnz %w[scratch], 0b\n\t"
"dmb ish\n\t"
: [res] "=&r"(res), [scratch] "=&r"(scratch)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint16_t AtomicExchange16SeqCst(uint16_t* addr, uint16_t val) {
uint16_t res;
uint32_t scratch;
asm volatile ("dmb ish\n\t"
"0:\n\t"
"ldxrh %w[res], [%x[addr]]\n\t"
"stxrh %w[scratch], %w[val], [%x[addr]]\n\t"
"cbnz %w[scratch], 0b\n\t"
"dmb ish\n\t"
: [res] "=&r"(res), [scratch] "=&r"(scratch)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint32_t AtomicExchange32SeqCst(uint32_t* addr, uint32_t val) {
uint32_t res;
uint32_t scratch;
asm volatile ("dmb ish\n\t"
"0:\n\t"
"ldxr %w[res], [%x[addr]]\n\t"
"stxr %w[scratch], %w[val], [%x[addr]]\n\t"
"cbnz %w[scratch], 0b\n\t"
"dmb ish\n\t"
: [res] "=&r"(res), [scratch] "=&r"(scratch)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint64_t AtomicExchange64SeqCst(uint64_t* addr, uint64_t val) {
uint64_t res;
uint32_t scratch;
asm volatile ("dmb ish\n\t"
"0:\n\t"
"ldxr %x[res], [%x[addr]]\n\t"
"stxr %w[scratch], %x[val], [%x[addr]]\n\t"
"cbnz %w[scratch], 0b\n\t"
"dmb ish\n\t"
: [res] "=&r"(res), [scratch] "=&r"(scratch)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint8_t AtomicCmpXchg8SeqCst(uint8_t* addr,
uint8_t oldval,
uint8_t newval) {
uint8_t res, scratch;
asm volatile ("dmb ish\n\t"
"0:\n\t"
"uxtb %w[scratch], %w[oldval]\n\t"
"ldxrb %w[res], [%x[addr]]\n\t"
"cmp %w[res], %w[scratch]\n\t"
"b.ne 1f\n\t"
"stxrb %w[scratch], %w[newval], [%x[addr]]\n\t"
"cbnz %w[scratch], 0b\n\t"
"1: dmb ish\n\t"
: [res] "=&r" (res), [scratch] "=&r" (scratch)
: [addr] "r" (addr), [oldval] "r"(oldval), [newval] "r" (newval)
: "memory", "cc");
return res;
}
inline uint16_t AtomicCmpXchg16SeqCst(uint16_t* addr,
uint16_t oldval,
uint16_t newval) {
uint16_t res, scratch;
asm volatile ("dmb ish\n\t"
"0:\n\t"
"uxth %w[scratch], %w[oldval]\n\t"
"ldxrh %w[res], [%x[addr]]\n\t"
"cmp %w[res], %w[scratch]\n\t"
"b.ne 1f\n\t"
"stxrh %w[scratch], %w[newval], [%x[addr]]\n\t"
"cbnz %w[scratch], 0b\n\t"
"1: dmb ish\n\t"
: [res] "=&r" (res), [scratch] "=&r" (scratch)
: [addr] "r" (addr), [oldval] "r"(oldval), [newval] "r" (newval)
: "memory", "cc");
return res;
}
inline uint32_t AtomicCmpXchg32SeqCst(uint32_t* addr,
uint32_t oldval,
uint32_t newval) {
uint32_t res, scratch;
asm volatile ("dmb ish\n\t"
"0:\n\t"
"mov %w[scratch], %w[oldval]\n\t"
"ldxr %w[res], [%x[addr]]\n\t"
"cmp %w[res], %w[scratch]\n\t"
"b.ne 1f\n\t"
"stxr %w[scratch], %w[newval], [%x[addr]]\n\t"
"cbnz %w[scratch], 0b\n\t"
"1: dmb ish\n\t"
: [res] "=&r" (res), [scratch] "=&r" (scratch)
: [addr] "r" (addr), [oldval] "r"(oldval), [newval] "r" (newval)
: "memory", "cc");
return res;
}
inline uint64_t AtomicCmpXchg64SeqCst(uint64_t* addr,
uint64_t oldval,
uint64_t newval) {
uint64_t res, scratch;
asm volatile ("dmb ish\n\t"
"0:\n\t"
"mov %x[scratch], %x[oldval]\n\t"
"ldxr %x[res], [%x[addr]]\n\t"
"cmp %x[res], %x[scratch]\n\t"
"b.ne 1f\n\t"
"stxr %w[scratch], %x[newval], [%x[addr]]\n\t"
"cbnz %w[scratch], 0b\n\t"
"1: dmb ish\n\t"
: [res] "=&r" (res), [scratch] "=&r" (scratch)
: [addr] "r" (addr), [oldval] "r"(oldval), [newval] "r" (newval)
: "memory", "cc");
return res;
}
inline uint8_t AtomicAdd8SeqCst(uint8_t* addr, uint8_t val) {
uint8_t res;
uintptr_t scratch1, scratch2;
asm volatile ("dmb ish\n\t"
"0:\n\t"
"ldxrb %w[res], [%x[addr]]\n\t"
"add %x[scratch1], %x[res], %x[val]\n\t"
"stxrb %w[scratch2], %w[scratch1], [%x[addr]]\n\t"
"cbnz %w[scratch2], 0b\n\t"
"dmb ish\n\t"
: [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint16_t AtomicAdd16SeqCst(uint16_t* addr, uint16_t val) {
uint16_t res;
uintptr_t scratch1, scratch2;
asm volatile ("dmb ish\n\t"
"0:\n\t"
"ldxrh %w[res], [%x[addr]]\n\t"
"add %x[scratch1], %x[res], %x[val]\n\t"
"stxrh %w[scratch2], %w[scratch1], [%x[addr]]\n\t"
"cbnz %w[scratch2], 0b\n\t"
"dmb ish\n\t"
: [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint32_t AtomicAdd32SeqCst(uint32_t* addr, uint32_t val) {
uint32_t res;
uintptr_t scratch1, scratch2;
asm volatile ("dmb ish\n\t"
"0:\n\t"
"ldxr %w[res], [%x[addr]]\n\t"
"add %x[scratch1], %x[res], %x[val]\n\t"
"stxr %w[scratch2], %w[scratch1], [%x[addr]]\n\t"
"cbnz %w[scratch2], 0b\n\t"
"dmb ish\n\t"
: [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint64_t AtomicAdd64SeqCst(uint64_t* addr, uint64_t val) {
uint64_t res;
uintptr_t scratch1, scratch2;
asm volatile ("dmb ish\n\t"
"0:\n\t"
"ldxr %x[res], [%x[addr]]\n\t"
"add %x[scratch1], %x[res], %x[val]\n\t"
"stxr %w[scratch2], %x[scratch1], [%x[addr]]\n\t"
"cbnz %w[scratch2], 0b\n\t"
"dmb ish\n\t"
: [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint8_t AtomicAnd8SeqCst(uint8_t* addr, uint8_t val) {
uint8_t res;
uintptr_t scratch1, scratch2;
asm volatile ("dmb ish\n\t"
"0:\n\t"
"ldxrb %w[res], [%x[addr]]\n\t"
"and %x[scratch1], %x[res], %x[val]\n\t"
"stxrb %w[scratch2], %w[scratch1], [%x[addr]]\n\t"
"cbnz %w[scratch2], 0b\n\t"
"dmb ish\n\t"
: [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint16_t AtomicAnd16SeqCst(uint16_t* addr, uint16_t val) {
uint16_t res;
uintptr_t scratch1, scratch2;
asm volatile ("dmb ish\n\t"
"0:\n\t"
"ldxrh %w[res], [%x[addr]]\n\t"
"and %x[scratch1], %x[res], %x[val]\n\t"
"stxrh %w[scratch2], %w[scratch1], [%x[addr]]\n\t"
"cbnz %w[scratch2], 0b\n\t"
"dmb ish\n\t"
: [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint32_t AtomicAnd32SeqCst(uint32_t* addr, uint32_t val) {
uint32_t res;
uintptr_t scratch1, scratch2;
asm volatile ("dmb ish\n\t"
"0:\n\t"
"ldxr %w[res], [%x[addr]]\n\t"
"and %x[scratch1], %x[res], %x[val]\n\t"
"stxr %w[scratch2], %w[scratch1], [%x[addr]]\n\t"
"cbnz %w[scratch2], 0b\n\t"
"dmb ish\n\t"
: [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint64_t AtomicAnd64SeqCst(uint64_t* addr, uint64_t val) {
uint64_t res;
uintptr_t scratch1, scratch2;
asm volatile ("dmb ish\n\t"
"0:\n\t"
"ldxr %x[res], [%x[addr]]\n\t"
"and %x[scratch1], %x[res], %x[val]\n\t"
"stxr %w[scratch2], %x[scratch1], [%x[addr]]\n\t"
"cbnz %w[scratch2], 0b\n\t"
"dmb ish\n\t"
: [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint8_t AtomicOr8SeqCst(uint8_t* addr, uint8_t val) {
uint8_t res;
uintptr_t scratch1, scratch2;
asm volatile ("dmb ish\n\t"
"0:\n\t"
"ldxrb %w[res], [%x[addr]]\n\t"
"orr %x[scratch1], %x[res], %x[val]\n\t"
"stxrb %w[scratch2], %w[scratch1], [%x[addr]]\n\t"
"cbnz %w[scratch2], 0b\n\t"
"dmb ish\n\t"
: [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint16_t AtomicOr16SeqCst(uint16_t* addr, uint16_t val) {
uint16_t res;
uintptr_t scratch1, scratch2;
asm volatile ("dmb ish\n\t"
"0:\n\t"
"ldxrh %w[res], [%x[addr]]\n\t"
"orr %x[scratch1], %x[res], %x[val]\n\t"
"stxrh %w[scratch2], %w[scratch1], [%x[addr]]\n\t"
"cbnz %w[scratch2], 0b\n\t"
"dmb ish\n\t"
: [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint32_t AtomicOr32SeqCst(uint32_t* addr, uint32_t val) {
uint32_t res;
uintptr_t scratch1, scratch2;
asm volatile ("dmb ish\n\t"
"0:\n\t"
"ldxr %w[res], [%x[addr]]\n\t"
"orr %x[scratch1], %x[res], %x[val]\n\t"
"stxr %w[scratch2], %w[scratch1], [%x[addr]]\n\t"
"cbnz %w[scratch2], 0b\n\t"
"dmb ish\n\t"
: [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint64_t AtomicOr64SeqCst(uint64_t* addr, uint64_t val) {
uint64_t res;
uintptr_t scratch1, scratch2;
asm volatile ("dmb ish\n\t"
"0:\n\t"
"ldxr %x[res], [%x[addr]]\n\t"
"orr %x[scratch1], %x[res], %x[val]\n\t"
"stxr %w[scratch2], %x[scratch1], [%x[addr]]\n\t"
"cbnz %w[scratch2], 0b\n\t"
"dmb ish\n\t"
: [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint8_t AtomicXor8SeqCst(uint8_t* addr, uint8_t val) {
uint8_t res;
uintptr_t scratch1, scratch2;
asm volatile ("dmb ish\n\t"
"0:\n\t"
"ldxrb %w[res], [%x[addr]]\n\t"
"eor %x[scratch1], %x[res], %x[val]\n\t"
"stxrb %w[scratch2], %w[scratch1], [%x[addr]]\n\t"
"cbnz %w[scratch2], 0b\n\t"
"dmb ish\n\t"
: [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint16_t AtomicXor16SeqCst(uint16_t* addr, uint16_t val) {
uint16_t res;
uintptr_t scratch1, scratch2;
asm volatile ("dmb ish\n\t"
"0:\n\t"
"ldxrh %w[res], [%x[addr]]\n\t"
"eor %x[scratch1], %x[res], %x[val]\n\t"
"stxrh %w[scratch2], %w[scratch1], [%x[addr]]\n\t"
"cbnz %w[scratch2], 0b\n\t"
"dmb ish\n\t"
: [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint32_t AtomicXor32SeqCst(uint32_t* addr, uint32_t val) {
uint32_t res;
uintptr_t scratch1, scratch2;
asm volatile ("dmb ish\n\t"
"0:\n\t"
"ldxr %w[res], [%x[addr]]\n\t"
"eor %x[scratch1], %x[res], %x[val]\n\t"
"stxr %w[scratch2], %w[scratch1], [%x[addr]]\n\t"
"cbnz %w[scratch2], 0b\n\t"
"dmb ish\n\t"
: [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint64_t AtomicXor64SeqCst(uint64_t* addr, uint64_t val) {
uint64_t res;
uintptr_t scratch1, scratch2;
asm volatile ("dmb ish\n\t"
"0:\n\t"
"ldxr %x[res], [%x[addr]]\n\t"
"eor %x[scratch1], %x[res], %x[val]\n\t"
"stxr %w[scratch2], %x[scratch1], [%x[addr]]\n\t"
"cbnz %w[scratch2], 0b\n\t"
"dmb ish\n\t"
: [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline void AtomicPause() {
asm volatile ("isb" ::: "memory");
}
inline void AtomicCopyUnalignedBlockDownUnsynchronized(uint8_t* dst, const uint8_t* src) {
uint8_t* dst_ = reinterpret_cast<uint8_t*>(dst);
const uint8_t* src_ = reinterpret_cast<const uint8_t*>(src);
uint8_t scratch;
asm volatile ("ldrb %w[scratch], [%x[src], 0]\n\t"
"strb %w[scratch], [%x[dst], 0]\n\t"
"ldrb %w[scratch], [%x[src], 1]\n\t"
"strb %w[scratch], [%x[dst], 1]\n\t"
"ldrb %w[scratch], [%x[src], 2]\n\t"
"strb %w[scratch], [%x[dst], 2]\n\t"
"ldrb %w[scratch], [%x[src], 3]\n\t"
"strb %w[scratch], [%x[dst], 3]\n\t"
"ldrb %w[scratch], [%x[src], 4]\n\t"
"strb %w[scratch], [%x[dst], 4]\n\t"
"ldrb %w[scratch], [%x[src], 5]\n\t"
"strb %w[scratch], [%x[dst], 5]\n\t"
"ldrb %w[scratch], [%x[src], 6]\n\t"
"strb %w[scratch], [%x[dst], 6]\n\t"
"ldrb %w[scratch], [%x[src], 7]\n\t"
"strb %w[scratch], [%x[dst], 7]\n\t"
"ldrb %w[scratch], [%x[src], 8]\n\t"
"strb %w[scratch], [%x[dst], 8]\n\t"
"ldrb %w[scratch], [%x[src], 9]\n\t"
"strb %w[scratch], [%x[dst], 9]\n\t"
"ldrb %w[scratch], [%x[src], 10]\n\t"
"strb %w[scratch], [%x[dst], 10]\n\t"
"ldrb %w[scratch], [%x[src], 11]\n\t"
"strb %w[scratch], [%x[dst], 11]\n\t"
"ldrb %w[scratch], [%x[src], 12]\n\t"
"strb %w[scratch], [%x[dst], 12]\n\t"
"ldrb %w[scratch], [%x[src], 13]\n\t"
"strb %w[scratch], [%x[dst], 13]\n\t"
"ldrb %w[scratch], [%x[src], 14]\n\t"
"strb %w[scratch], [%x[dst], 14]\n\t"
"ldrb %w[scratch], [%x[src], 15]\n\t"
"strb %w[scratch], [%x[dst], 15]\n\t"
"ldrb %w[scratch], [%x[src], 16]\n\t"
"strb %w[scratch], [%x[dst], 16]\n\t"
"ldrb %w[scratch], [%x[src], 17]\n\t"
"strb %w[scratch], [%x[dst], 17]\n\t"
"ldrb %w[scratch], [%x[src], 18]\n\t"
"strb %w[scratch], [%x[dst], 18]\n\t"
"ldrb %w[scratch], [%x[src], 19]\n\t"
"strb %w[scratch], [%x[dst], 19]\n\t"
"ldrb %w[scratch], [%x[src], 20]\n\t"
"strb %w[scratch], [%x[dst], 20]\n\t"
"ldrb %w[scratch], [%x[src], 21]\n\t"
"strb %w[scratch], [%x[dst], 21]\n\t"
"ldrb %w[scratch], [%x[src], 22]\n\t"
"strb %w[scratch], [%x[dst], 22]\n\t"
"ldrb %w[scratch], [%x[src], 23]\n\t"
"strb %w[scratch], [%x[dst], 23]\n\t"
"ldrb %w[scratch], [%x[src], 24]\n\t"
"strb %w[scratch], [%x[dst], 24]\n\t"
"ldrb %w[scratch], [%x[src], 25]\n\t"
"strb %w[scratch], [%x[dst], 25]\n\t"
"ldrb %w[scratch], [%x[src], 26]\n\t"
"strb %w[scratch], [%x[dst], 26]\n\t"
"ldrb %w[scratch], [%x[src], 27]\n\t"
"strb %w[scratch], [%x[dst], 27]\n\t"
"ldrb %w[scratch], [%x[src], 28]\n\t"
"strb %w[scratch], [%x[dst], 28]\n\t"
"ldrb %w[scratch], [%x[src], 29]\n\t"
"strb %w[scratch], [%x[dst], 29]\n\t"
"ldrb %w[scratch], [%x[src], 30]\n\t"
"strb %w[scratch], [%x[dst], 30]\n\t"
"ldrb %w[scratch], [%x[src], 31]\n\t"
"strb %w[scratch], [%x[dst], 31]\n\t"
"ldrb %w[scratch], [%x[src], 32]\n\t"
"strb %w[scratch], [%x[dst], 32]\n\t"
"ldrb %w[scratch], [%x[src], 33]\n\t"
"strb %w[scratch], [%x[dst], 33]\n\t"
"ldrb %w[scratch], [%x[src], 34]\n\t"
"strb %w[scratch], [%x[dst], 34]\n\t"
"ldrb %w[scratch], [%x[src], 35]\n\t"
"strb %w[scratch], [%x[dst], 35]\n\t"
"ldrb %w[scratch], [%x[src], 36]\n\t"
"strb %w[scratch], [%x[dst], 36]\n\t"
"ldrb %w[scratch], [%x[src], 37]\n\t"
"strb %w[scratch], [%x[dst], 37]\n\t"
"ldrb %w[scratch], [%x[src], 38]\n\t"
"strb %w[scratch], [%x[dst], 38]\n\t"
"ldrb %w[scratch], [%x[src], 39]\n\t"
"strb %w[scratch], [%x[dst], 39]\n\t"
"ldrb %w[scratch], [%x[src], 40]\n\t"
"strb %w[scratch], [%x[dst], 40]\n\t"
"ldrb %w[scratch], [%x[src], 41]\n\t"
"strb %w[scratch], [%x[dst], 41]\n\t"
"ldrb %w[scratch], [%x[src], 42]\n\t"
"strb %w[scratch], [%x[dst], 42]\n\t"
"ldrb %w[scratch], [%x[src], 43]\n\t"
"strb %w[scratch], [%x[dst], 43]\n\t"
"ldrb %w[scratch], [%x[src], 44]\n\t"
"strb %w[scratch], [%x[dst], 44]\n\t"
"ldrb %w[scratch], [%x[src], 45]\n\t"
"strb %w[scratch], [%x[dst], 45]\n\t"
"ldrb %w[scratch], [%x[src], 46]\n\t"
"strb %w[scratch], [%x[dst], 46]\n\t"
"ldrb %w[scratch], [%x[src], 47]\n\t"
"strb %w[scratch], [%x[dst], 47]\n\t"
"ldrb %w[scratch], [%x[src], 48]\n\t"
"strb %w[scratch], [%x[dst], 48]\n\t"
"ldrb %w[scratch], [%x[src], 49]\n\t"
"strb %w[scratch], [%x[dst], 49]\n\t"
"ldrb %w[scratch], [%x[src], 50]\n\t"
"strb %w[scratch], [%x[dst], 50]\n\t"
"ldrb %w[scratch], [%x[src], 51]\n\t"
"strb %w[scratch], [%x[dst], 51]\n\t"
"ldrb %w[scratch], [%x[src], 52]\n\t"
"strb %w[scratch], [%x[dst], 52]\n\t"
"ldrb %w[scratch], [%x[src], 53]\n\t"
"strb %w[scratch], [%x[dst], 53]\n\t"
"ldrb %w[scratch], [%x[src], 54]\n\t"
"strb %w[scratch], [%x[dst], 54]\n\t"
"ldrb %w[scratch], [%x[src], 55]\n\t"
"strb %w[scratch], [%x[dst], 55]\n\t"
"ldrb %w[scratch], [%x[src], 56]\n\t"
"strb %w[scratch], [%x[dst], 56]\n\t"
"ldrb %w[scratch], [%x[src], 57]\n\t"
"strb %w[scratch], [%x[dst], 57]\n\t"
"ldrb %w[scratch], [%x[src], 58]\n\t"
"strb %w[scratch], [%x[dst], 58]\n\t"
"ldrb %w[scratch], [%x[src], 59]\n\t"
"strb %w[scratch], [%x[dst], 59]\n\t"
"ldrb %w[scratch], [%x[src], 60]\n\t"
"strb %w[scratch], [%x[dst], 60]\n\t"
"ldrb %w[scratch], [%x[src], 61]\n\t"
"strb %w[scratch], [%x[dst], 61]\n\t"
"ldrb %w[scratch], [%x[src], 62]\n\t"
"strb %w[scratch], [%x[dst], 62]\n\t"
"ldrb %w[scratch], [%x[src], 63]\n\t"
"strb %w[scratch], [%x[dst], 63]\n\t"
: [scratch] "=&r" (scratch)
: [dst] "r" (dst_), [src] "r"(src_)
: "memory");
}
inline void AtomicCopyUnalignedBlockUpUnsynchronized(uint8_t* dst, const uint8_t* src) {
uint8_t* dst_ = reinterpret_cast<uint8_t*>(dst);
const uint8_t* src_ = reinterpret_cast<const uint8_t*>(src);
uint8_t scratch;
asm volatile ("ldrb %w[scratch], [%x[src], 63]\n\t"
"strb %w[scratch], [%x[dst], 63]\n\t"
"ldrb %w[scratch], [%x[src], 62]\n\t"
"strb %w[scratch], [%x[dst], 62]\n\t"
"ldrb %w[scratch], [%x[src], 61]\n\t"
"strb %w[scratch], [%x[dst], 61]\n\t"
"ldrb %w[scratch], [%x[src], 60]\n\t"
"strb %w[scratch], [%x[dst], 60]\n\t"
"ldrb %w[scratch], [%x[src], 59]\n\t"
"strb %w[scratch], [%x[dst], 59]\n\t"
"ldrb %w[scratch], [%x[src], 58]\n\t"
"strb %w[scratch], [%x[dst], 58]\n\t"
"ldrb %w[scratch], [%x[src], 57]\n\t"
"strb %w[scratch], [%x[dst], 57]\n\t"
"ldrb %w[scratch], [%x[src], 56]\n\t"
"strb %w[scratch], [%x[dst], 56]\n\t"
"ldrb %w[scratch], [%x[src], 55]\n\t"
"strb %w[scratch], [%x[dst], 55]\n\t"
"ldrb %w[scratch], [%x[src], 54]\n\t"
"strb %w[scratch], [%x[dst], 54]\n\t"
"ldrb %w[scratch], [%x[src], 53]\n\t"
"strb %w[scratch], [%x[dst], 53]\n\t"
"ldrb %w[scratch], [%x[src], 52]\n\t"
"strb %w[scratch], [%x[dst], 52]\n\t"
"ldrb %w[scratch], [%x[src], 51]\n\t"
"strb %w[scratch], [%x[dst], 51]\n\t"
"ldrb %w[scratch], [%x[src], 50]\n\t"
"strb %w[scratch], [%x[dst], 50]\n\t"
"ldrb %w[scratch], [%x[src], 49]\n\t"
"strb %w[scratch], [%x[dst], 49]\n\t"
"ldrb %w[scratch], [%x[src], 48]\n\t"
"strb %w[scratch], [%x[dst], 48]\n\t"
"ldrb %w[scratch], [%x[src], 47]\n\t"
"strb %w[scratch], [%x[dst], 47]\n\t"
"ldrb %w[scratch], [%x[src], 46]\n\t"
"strb %w[scratch], [%x[dst], 46]\n\t"
"ldrb %w[scratch], [%x[src], 45]\n\t"
"strb %w[scratch], [%x[dst], 45]\n\t"
"ldrb %w[scratch], [%x[src], 44]\n\t"
"strb %w[scratch], [%x[dst], 44]\n\t"
"ldrb %w[scratch], [%x[src], 43]\n\t"
"strb %w[scratch], [%x[dst], 43]\n\t"
"ldrb %w[scratch], [%x[src], 42]\n\t"
"strb %w[scratch], [%x[dst], 42]\n\t"
"ldrb %w[scratch], [%x[src], 41]\n\t"
"strb %w[scratch], [%x[dst], 41]\n\t"
"ldrb %w[scratch], [%x[src], 40]\n\t"
"strb %w[scratch], [%x[dst], 40]\n\t"
"ldrb %w[scratch], [%x[src], 39]\n\t"
"strb %w[scratch], [%x[dst], 39]\n\t"
"ldrb %w[scratch], [%x[src], 38]\n\t"
"strb %w[scratch], [%x[dst], 38]\n\t"
"ldrb %w[scratch], [%x[src], 37]\n\t"
"strb %w[scratch], [%x[dst], 37]\n\t"
"ldrb %w[scratch], [%x[src], 36]\n\t"
"strb %w[scratch], [%x[dst], 36]\n\t"
"ldrb %w[scratch], [%x[src], 35]\n\t"
"strb %w[scratch], [%x[dst], 35]\n\t"
"ldrb %w[scratch], [%x[src], 34]\n\t"
"strb %w[scratch], [%x[dst], 34]\n\t"
"ldrb %w[scratch], [%x[src], 33]\n\t"
"strb %w[scratch], [%x[dst], 33]\n\t"
"ldrb %w[scratch], [%x[src], 32]\n\t"
"strb %w[scratch], [%x[dst], 32]\n\t"
"ldrb %w[scratch], [%x[src], 31]\n\t"
"strb %w[scratch], [%x[dst], 31]\n\t"
"ldrb %w[scratch], [%x[src], 30]\n\t"
"strb %w[scratch], [%x[dst], 30]\n\t"
"ldrb %w[scratch], [%x[src], 29]\n\t"
"strb %w[scratch], [%x[dst], 29]\n\t"
"ldrb %w[scratch], [%x[src], 28]\n\t"
"strb %w[scratch], [%x[dst], 28]\n\t"
"ldrb %w[scratch], [%x[src], 27]\n\t"
"strb %w[scratch], [%x[dst], 27]\n\t"
"ldrb %w[scratch], [%x[src], 26]\n\t"
"strb %w[scratch], [%x[dst], 26]\n\t"
"ldrb %w[scratch], [%x[src], 25]\n\t"
"strb %w[scratch], [%x[dst], 25]\n\t"
"ldrb %w[scratch], [%x[src], 24]\n\t"
"strb %w[scratch], [%x[dst], 24]\n\t"
"ldrb %w[scratch], [%x[src], 23]\n\t"
"strb %w[scratch], [%x[dst], 23]\n\t"
"ldrb %w[scratch], [%x[src], 22]\n\t"
"strb %w[scratch], [%x[dst], 22]\n\t"
"ldrb %w[scratch], [%x[src], 21]\n\t"
"strb %w[scratch], [%x[dst], 21]\n\t"
"ldrb %w[scratch], [%x[src], 20]\n\t"
"strb %w[scratch], [%x[dst], 20]\n\t"
"ldrb %w[scratch], [%x[src], 19]\n\t"
"strb %w[scratch], [%x[dst], 19]\n\t"
"ldrb %w[scratch], [%x[src], 18]\n\t"
"strb %w[scratch], [%x[dst], 18]\n\t"
"ldrb %w[scratch], [%x[src], 17]\n\t"
"strb %w[scratch], [%x[dst], 17]\n\t"
"ldrb %w[scratch], [%x[src], 16]\n\t"
"strb %w[scratch], [%x[dst], 16]\n\t"
"ldrb %w[scratch], [%x[src], 15]\n\t"
"strb %w[scratch], [%x[dst], 15]\n\t"
"ldrb %w[scratch], [%x[src], 14]\n\t"
"strb %w[scratch], [%x[dst], 14]\n\t"
"ldrb %w[scratch], [%x[src], 13]\n\t"
"strb %w[scratch], [%x[dst], 13]\n\t"
"ldrb %w[scratch], [%x[src], 12]\n\t"
"strb %w[scratch], [%x[dst], 12]\n\t"
"ldrb %w[scratch], [%x[src], 11]\n\t"
"strb %w[scratch], [%x[dst], 11]\n\t"
"ldrb %w[scratch], [%x[src], 10]\n\t"
"strb %w[scratch], [%x[dst], 10]\n\t"
"ldrb %w[scratch], [%x[src], 9]\n\t"
"strb %w[scratch], [%x[dst], 9]\n\t"
"ldrb %w[scratch], [%x[src], 8]\n\t"
"strb %w[scratch], [%x[dst], 8]\n\t"
"ldrb %w[scratch], [%x[src], 7]\n\t"
"strb %w[scratch], [%x[dst], 7]\n\t"
"ldrb %w[scratch], [%x[src], 6]\n\t"
"strb %w[scratch], [%x[dst], 6]\n\t"
"ldrb %w[scratch], [%x[src], 5]\n\t"
"strb %w[scratch], [%x[dst], 5]\n\t"
"ldrb %w[scratch], [%x[src], 4]\n\t"
"strb %w[scratch], [%x[dst], 4]\n\t"
"ldrb %w[scratch], [%x[src], 3]\n\t"
"strb %w[scratch], [%x[dst], 3]\n\t"
"ldrb %w[scratch], [%x[src], 2]\n\t"
"strb %w[scratch], [%x[dst], 2]\n\t"
"ldrb %w[scratch], [%x[src], 1]\n\t"
"strb %w[scratch], [%x[dst], 1]\n\t"
"ldrb %w[scratch], [%x[src], 0]\n\t"
"strb %w[scratch], [%x[dst], 0]\n\t"
: [scratch] "=&r" (scratch)
: [dst] "r" (dst_), [src] "r"(src_)
: "memory");
}
inline void AtomicCopyUnalignedWordDownUnsynchronized(uint8_t* dst, const uint8_t* src) {
uint8_t* dst_ = reinterpret_cast<uint8_t*>(dst);
const uint8_t* src_ = reinterpret_cast<const uint8_t*>(src);
uint8_t scratch;
asm volatile ("ldrb %w[scratch], [%x[src], 0]\n\t"
"strb %w[scratch], [%x[dst], 0]\n\t"
"ldrb %w[scratch], [%x[src], 1]\n\t"
"strb %w[scratch], [%x[dst], 1]\n\t"
"ldrb %w[scratch], [%x[src], 2]\n\t"
"strb %w[scratch], [%x[dst], 2]\n\t"
"ldrb %w[scratch], [%x[src], 3]\n\t"
"strb %w[scratch], [%x[dst], 3]\n\t"
"ldrb %w[scratch], [%x[src], 4]\n\t"
"strb %w[scratch], [%x[dst], 4]\n\t"
"ldrb %w[scratch], [%x[src], 5]\n\t"
"strb %w[scratch], [%x[dst], 5]\n\t"
"ldrb %w[scratch], [%x[src], 6]\n\t"
"strb %w[scratch], [%x[dst], 6]\n\t"
"ldrb %w[scratch], [%x[src], 7]\n\t"
"strb %w[scratch], [%x[dst], 7]\n\t"
: [scratch] "=&r" (scratch)
: [dst] "r" (dst_), [src] "r"(src_)
: "memory");
}
inline void AtomicCopyUnalignedWordUpUnsynchronized(uint8_t* dst, const uint8_t* src) {
uint8_t* dst_ = reinterpret_cast<uint8_t*>(dst);
const uint8_t* src_ = reinterpret_cast<const uint8_t*>(src);
uint8_t scratch;
asm volatile ("ldrb %w[scratch], [%x[src], 7]\n\t"
"strb %w[scratch], [%x[dst], 7]\n\t"
"ldrb %w[scratch], [%x[src], 6]\n\t"
"strb %w[scratch], [%x[dst], 6]\n\t"
"ldrb %w[scratch], [%x[src], 5]\n\t"
"strb %w[scratch], [%x[dst], 5]\n\t"
"ldrb %w[scratch], [%x[src], 4]\n\t"
"strb %w[scratch], [%x[dst], 4]\n\t"
"ldrb %w[scratch], [%x[src], 3]\n\t"
"strb %w[scratch], [%x[dst], 3]\n\t"
"ldrb %w[scratch], [%x[src], 2]\n\t"
"strb %w[scratch], [%x[dst], 2]\n\t"
"ldrb %w[scratch], [%x[src], 1]\n\t"
"strb %w[scratch], [%x[dst], 1]\n\t"
"ldrb %w[scratch], [%x[src], 0]\n\t"
"strb %w[scratch], [%x[dst], 0]\n\t"
: [scratch] "=&r" (scratch)
: [dst] "r" (dst_), [src] "r"(src_)
: "memory");
}
inline void AtomicCopyBlockDownUnsynchronized(uint8_t* dst, const uint8_t* src) {
uintptr_t* dst_ = reinterpret_cast<uintptr_t*>(dst);
const uintptr_t* src_ = reinterpret_cast<const uintptr_t*>(src);
uintptr_t scratch;
asm volatile ("ldr %x[scratch], [%x[src], 0]\n\t"
"str %x[scratch], [%x[dst], 0]\n\t"
"ldr %x[scratch], [%x[src], 8]\n\t"
"str %x[scratch], [%x[dst], 8]\n\t"
"ldr %x[scratch], [%x[src], 16]\n\t"
"str %x[scratch], [%x[dst], 16]\n\t"
"ldr %x[scratch], [%x[src], 24]\n\t"
"str %x[scratch], [%x[dst], 24]\n\t"
"ldr %x[scratch], [%x[src], 32]\n\t"
"str %x[scratch], [%x[dst], 32]\n\t"
"ldr %x[scratch], [%x[src], 40]\n\t"
"str %x[scratch], [%x[dst], 40]\n\t"
"ldr %x[scratch], [%x[src], 48]\n\t"
"str %x[scratch], [%x[dst], 48]\n\t"
"ldr %x[scratch], [%x[src], 56]\n\t"
"str %x[scratch], [%x[dst], 56]\n\t"
: [scratch] "=&r" (scratch)
: [dst] "r" (dst_), [src] "r"(src_)
: "memory");
}
inline void AtomicCopyBlockUpUnsynchronized(uint8_t* dst, const uint8_t* src) {
uintptr_t* dst_ = reinterpret_cast<uintptr_t*>(dst);
const uintptr_t* src_ = reinterpret_cast<const uintptr_t*>(src);
uintptr_t scratch;
asm volatile ("ldr %x[scratch], [%x[src], 56]\n\t"
"str %x[scratch], [%x[dst], 56]\n\t"
"ldr %x[scratch], [%x[src], 48]\n\t"
"str %x[scratch], [%x[dst], 48]\n\t"
"ldr %x[scratch], [%x[src], 40]\n\t"
"str %x[scratch], [%x[dst], 40]\n\t"
"ldr %x[scratch], [%x[src], 32]\n\t"
"str %x[scratch], [%x[dst], 32]\n\t"
"ldr %x[scratch], [%x[src], 24]\n\t"
"str %x[scratch], [%x[dst], 24]\n\t"
"ldr %x[scratch], [%x[src], 16]\n\t"
"str %x[scratch], [%x[dst], 16]\n\t"
"ldr %x[scratch], [%x[src], 8]\n\t"
"str %x[scratch], [%x[dst], 8]\n\t"
"ldr %x[scratch], [%x[src], 0]\n\t"
"str %x[scratch], [%x[dst], 0]\n\t"
: [scratch] "=&r" (scratch)
: [dst] "r" (dst_), [src] "r"(src_)
: "memory");
}
inline void AtomicCopyWordUnsynchronized(uint8_t* dst, const uint8_t* src) {
uintptr_t* dst_ = reinterpret_cast<uintptr_t*>(dst);
const uintptr_t* src_ = reinterpret_cast<const uintptr_t*>(src);
uintptr_t scratch;
asm volatile ("ldr %x[scratch], [%x[src], 0]\n\t"
"str %x[scratch], [%x[dst], 0]\n\t"
: [scratch] "=&r" (scratch)
: [dst] "r" (dst_), [src] "r"(src_)
: "memory");
}
inline void AtomicCopyByteUnsynchronized(uint8_t* dst, const uint8_t* src) {
uint8_t* dst_ = reinterpret_cast<uint8_t*>(dst);
const uint8_t* src_ = reinterpret_cast<const uint8_t*>(src);
uint8_t scratch;
asm volatile ("ldrb %w[scratch], [%x[src], 0]\n\t"
"strb %w[scratch], [%x[dst], 0]\n\t"
: [scratch] "=&r" (scratch)
: [dst] "r" (dst_), [src] "r"(src_)
: "memory");
}
constexpr size_t JS_GENERATED_ATOMICS_BLOCKSIZE = 64;
constexpr size_t JS_GENERATED_ATOMICS_WORDSIZE = 8;
} // namespace jit
} // namespace js
#endif // jit_AtomicOperationsGenerated_h