debian/0000775000000000000000000000000012314735401007170 5ustar debian/watch.in0000664000000000000000000000017712160515514010633 0ustar # Compulsory line, this is a version 3 file version=3 https://github.com/v8/v8/tags .*/archive/(__watchBranch__[\d.]*).tar.gz debian/copyright_hints0000664000000000000000000017731612160515514012347 0ustar Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: FIXME Upstream-Contact: FIXME Source: FIXME Disclaimer: Autogenerated by CDBS Files: LICENSE LICENSE.v8 Makefile Makefile.android SConstruct benchmarks/base.js benchmarks/regexp.js benchmarks/richards.js benchmarks/run.js benchmarks/spinning-balls/splay-tree.js benchmarks/spinning-balls/v.js benchmarks/splay.js build/android.gypi build/common.gypi build/gyp_v8 build/standalone.gypi debian/samples.gyp include/v8-debug.h include/v8-preparser.h include/v8-profiler.h include/v8-testing.h include/v8.h include/v8stdint.h preparser/SConscript preparser/preparser-process.cc preparser/preparser.gyp samples/SConscript samples/count-hosts.js samples/lineprocessor.cc samples/process.cc samples/samples.gyp samples/shell.cc src/SConscript src/accessors.cc src/accessors.h src/allocation-inl.h src/allocation.cc src/allocation.h src/api.cc src/api.h src/apinatives.js src/apiutils.h src/arguments.h src/arm/builtins-arm.cc src/arm/code-stubs-arm.cc src/arm/code-stubs-arm.h src/arm/codegen-arm.cc src/arm/codegen-arm.h src/arm/constants-arm.cc src/arm/constants-arm.h src/arm/cpu-arm.cc src/arm/debug-arm.cc src/arm/deoptimizer-arm.cc src/arm/disasm-arm.cc src/arm/frames-arm.cc src/arm/frames-arm.h src/arm/full-codegen-arm.cc src/arm/ic-arm.cc src/arm/lithium-arm.cc src/arm/lithium-arm.h src/arm/lithium-codegen-arm.cc src/arm/lithium-codegen-arm.h src/arm/lithium-gap-resolver-arm.cc src/arm/lithium-gap-resolver-arm.h src/arm/macro-assembler-arm.cc src/arm/macro-assembler-arm.h src/arm/regexp-macro-assembler-arm.cc src/arm/regexp-macro-assembler-arm.h src/arm/simulator-arm.cc src/arm/simulator-arm.h src/arm/stub-cache-arm.cc src/array.js src/ast.cc src/ast.h src/atomicops.h src/atomicops_internals_arm_gcc.h src/atomicops_internals_mips_gcc.h src/atomicops_internals_x86_gcc.cc src/atomicops_internals_x86_gcc.h src/atomicops_internals_x86_macosx.h src/atomicops_internals_x86_msvc.h src/bignum-dtoa.cc src/bignum-dtoa.h src/bignum.cc src/bignum.h src/bootstrapper.cc src/bootstrapper.h src/builtins.cc src/builtins.h src/bytecodes-irregexp.h src/cached-powers.cc src/cached-powers.h src/char-predicates.h src/checks.cc src/checks.h src/circular-queue-inl.h src/circular-queue.cc src/circular-queue.h src/code-stubs.cc src/code-stubs.h src/code.h src/codegen.cc src/codegen.h src/collection.js src/compilation-cache.cc src/compilation-cache.h src/compiler-intrinsics.h src/compiler.cc src/compiler.h src/contexts.cc src/contexts.h src/conversions-inl.h src/conversions.cc src/conversions.h src/counters.cc src/counters.h src/cpu-profiler-inl.h src/cpu-profiler.cc src/cpu-profiler.h src/cpu.h src/d8-debug.cc src/d8-debug.h src/d8-posix.cc src/d8-readline.cc src/d8-windows.cc src/d8.cc src/d8.gyp src/d8.h src/d8.js src/data-flow.cc src/data-flow.h src/date.cc src/date.h src/date.js src/dateparser-inl.h src/dateparser.cc src/dateparser.h src/debug-agent.cc src/debug-agent.h src/debug-debugger.js src/debug.cc src/debug.h src/deoptimizer.cc src/deoptimizer.h src/disasm.h src/disassembler.cc src/disassembler.h src/diy-fp.cc src/diy-fp.h src/double.h src/dtoa.cc src/dtoa.h src/elements-kind.cc src/elements-kind.h src/elements.cc src/elements.h src/execution.cc src/execution.h src/extensions/externalize-string-extension.cc src/extensions/externalize-string-extension.h src/extensions/gc-extension.cc src/extensions/gc-extension.h src/extensions/statistics-extension.cc src/extensions/statistics-extension.h src/factory.cc src/factory.h src/fast-dtoa.cc src/fast-dtoa.h src/fixed-dtoa.cc src/fixed-dtoa.h src/flag-definitions.h src/flags.cc src/flags.h src/frames-inl.h src/frames.cc src/frames.h src/full-codegen.cc src/full-codegen.h src/func-name-inferrer.cc src/func-name-inferrer.h src/gdb-jit.cc src/gdb-jit.h src/global-handles.cc src/global-handles.h src/globals.h src/handles-inl.h src/handles.cc src/handles.h src/hashmap.h src/heap-inl.h src/heap-profiler.cc src/heap-profiler.h src/heap.cc src/heap.h src/hydrogen-instructions.cc src/hydrogen-instructions.h src/hydrogen.h src/ia32/builtins-ia32.cc src/ia32/code-stubs-ia32.cc src/ia32/code-stubs-ia32.h src/ia32/codegen-ia32.cc src/ia32/codegen-ia32.h src/ia32/cpu-ia32.cc src/ia32/debug-ia32.cc src/ia32/deoptimizer-ia32.cc src/ia32/disasm-ia32.cc src/ia32/frames-ia32.cc src/ia32/frames-ia32.h src/ia32/full-codegen-ia32.cc src/ia32/ic-ia32.cc src/ia32/lithium-codegen-ia32.cc src/ia32/lithium-codegen-ia32.h src/ia32/lithium-gap-resolver-ia32.cc src/ia32/lithium-gap-resolver-ia32.h src/ia32/lithium-ia32.cc src/ia32/lithium-ia32.h src/ia32/macro-assembler-ia32.cc src/ia32/macro-assembler-ia32.h src/ia32/regexp-macro-assembler-ia32.cc src/ia32/regexp-macro-assembler-ia32.h src/ia32/simulator-ia32.cc src/ia32/simulator-ia32.h src/ia32/stub-cache-ia32.cc src/ic-inl.h src/ic.cc src/ic.h src/incremental-marking-inl.h src/incremental-marking.cc src/incremental-marking.h src/inspector.cc src/inspector.h src/interface.cc src/interface.h src/interpreter-irregexp.cc src/interpreter-irregexp.h src/isolate-inl.h src/isolate.cc src/isolate.h src/json-parser.h src/json.js src/jsregexp.cc src/jsregexp.h src/lazy-instance.h src/list-inl.h src/list.h src/lithium-allocator-inl.h src/lithium-allocator.cc src/lithium-allocator.h src/lithium.cc src/lithium.h src/liveedit-debugger.js src/liveedit.cc src/liveedit.h src/liveobjectlist-inl.h src/liveobjectlist.cc src/liveobjectlist.h src/log-inl.h src/log-utils.cc src/log-utils.h src/log.cc src/log.h src/macro-assembler.h src/macros.py src/mark-compact-inl.h src/mark-compact.cc src/mark-compact.h src/math.js src/messages.cc src/messages.h src/messages.js src/mips/builtins-mips.cc src/mips/code-stubs-mips.cc src/mips/code-stubs-mips.h src/mips/codegen-mips.cc src/mips/codegen-mips.h src/mips/constants-mips.cc src/mips/constants-mips.h src/mips/cpu-mips.cc src/mips/debug-mips.cc src/mips/deoptimizer-mips.cc src/mips/disasm-mips.cc src/mips/frames-mips.cc src/mips/frames-mips.h src/mips/full-codegen-mips.cc src/mips/ic-mips.cc src/mips/lithium-codegen-mips.cc src/mips/lithium-codegen-mips.h src/mips/lithium-gap-resolver-mips.cc src/mips/lithium-gap-resolver-mips.h src/mips/lithium-mips.cc src/mips/lithium-mips.h src/mips/macro-assembler-mips.cc src/mips/macro-assembler-mips.h src/mips/regexp-macro-assembler-mips.cc src/mips/regexp-macro-assembler-mips.h src/mips/simulator-mips.cc src/mips/simulator-mips.h src/mips/stub-cache-mips.cc src/mirror-debugger.js src/misc-intrinsics.h src/mksnapshot.cc src/natives.h src/objects-debug.cc src/objects-inl.h src/objects-printer.cc src/objects-visiting-inl.h src/objects-visiting.cc src/objects-visiting.h src/objects.cc src/objects.h src/once.cc src/once.h src/optimizing-compiler-thread.cc src/optimizing-compiler-thread.h src/parser.cc src/parser.h src/platform-cygwin.cc src/platform-freebsd.cc src/platform-linux.cc src/platform-macos.cc src/platform-nullos.cc src/platform-openbsd.cc src/platform-posix.cc src/platform-posix.h src/platform-solaris.cc src/platform-tls-mac.h src/platform-tls-win32.h src/platform-tls.h src/platform-win32.cc src/platform.h src/preparse-data-format.h src/preparse-data.cc src/preparse-data.h src/preparser-api.cc src/preparser.cc src/preparser.h src/prettyprinter.cc src/prettyprinter.h src/profile-generator-inl.h src/profile-generator.cc src/profile-generator.h src/property-details.h src/property.cc src/property.h src/proxy.js src/regexp-macro-assembler-irregexp-inl.h src/regexp-macro-assembler-irregexp.cc src/regexp-macro-assembler-irregexp.h src/regexp-macro-assembler-tracer.cc src/regexp-macro-assembler-tracer.h src/regexp-macro-assembler.cc src/regexp-macro-assembler.h src/regexp-stack.cc src/regexp-stack.h src/regexp.js src/rewriter.cc src/rewriter.h src/runtime-profiler.cc src/runtime-profiler.h src/runtime.h src/runtime.js src/safepoint-table.cc src/safepoint-table.h src/scanner-character-streams.cc src/scanner-character-streams.h src/scopeinfo.cc src/scopeinfo.h src/scopes.cc src/scopes.h src/serialize.cc src/serialize.h src/simulator.h src/small-pointer-list.h src/smart-pointers.h src/snapshot-common.cc src/snapshot-empty.cc src/snapshot.h src/spaces-inl.h src/spaces.cc src/spaces.h src/splay-tree-inl.h src/splay-tree.h src/store-buffer-inl.h src/store-buffer.cc src/store-buffer.h src/string-search.cc src/string-search.h src/string-stream.cc src/string-stream.h src/string.js src/strtod.cc src/strtod.h src/stub-cache.cc src/stub-cache.h src/token.cc src/token.h src/transitions-inl.h src/transitions.cc src/transitions.h src/type-info.cc src/type-info.h src/unbound-queue-inl.h src/unbound-queue.h src/unicode-inl.h src/unicode.cc src/unicode.h src/uri.js src/utils-inl.h src/utils.cc src/utils.h src/v8-counters.cc src/v8-counters.h src/v8.cc src/v8.h src/v8checks.h src/v8conversions.cc src/v8conversions.h src/v8dll-main.cc src/v8globals.h src/v8memory.h src/v8natives.js src/v8preparserdll-main.cc src/v8threads.cc src/v8threads.h src/v8utils.cc src/v8utils.h src/variables.cc src/variables.h src/version.cc src/version.h src/vm-state-inl.h src/vm-state.h src/win32-headers.h src/win32-math.cc src/win32-math.h src/x64/assembler-x64-inl.h src/x64/assembler-x64.cc src/x64/builtins-x64.cc src/x64/code-stubs-x64.cc src/x64/code-stubs-x64.h src/x64/codegen-x64.cc src/x64/codegen-x64.h src/x64/cpu-x64.cc src/x64/debug-x64.cc src/x64/deoptimizer-x64.cc src/x64/disasm-x64.cc src/x64/frames-x64.cc src/x64/frames-x64.h src/x64/full-codegen-x64.cc src/x64/ic-x64.cc src/x64/lithium-codegen-x64.cc src/x64/lithium-codegen-x64.h src/x64/lithium-gap-resolver-x64.cc src/x64/lithium-gap-resolver-x64.h src/x64/lithium-x64.cc src/x64/lithium-x64.h src/x64/macro-assembler-x64.cc src/x64/macro-assembler-x64.h src/x64/regexp-macro-assembler-x64.cc src/x64/regexp-macro-assembler-x64.h src/x64/simulator-x64.cc src/x64/simulator-x64.h src/x64/stub-cache-x64.cc src/zone-inl.h src/zone.cc src/zone.h test/benchmarks/testcfg.py test/cctest/SConscript test/cctest/cctest.cc test/cctest/cctest.gyp test/cctest/cctest.h test/cctest/cctest.status test/cctest/gay-fixed.cc test/cctest/gay-fixed.h test/cctest/gay-precision.cc test/cctest/gay-precision.h test/cctest/gay-shortest.cc test/cctest/gay-shortest.h test/cctest/log-eq-of-logging-and-traversal.js test/cctest/test-accessors.cc test/cctest/test-alloc.cc test/cctest/test-api.cc test/cctest/test-assembler-arm.cc test/cctest/test-assembler-ia32.cc test/cctest/test-assembler-mips.cc test/cctest/test-assembler-x64.cc test/cctest/test-ast.cc test/cctest/test-bignum-dtoa.cc test/cctest/test-bignum.cc test/cctest/test-compiler.cc test/cctest/test-dataflow.cc test/cctest/test-date.cc test/cctest/test-debug.cc test/cctest/test-decls.cc test/cctest/test-deoptimization.cc test/cctest/test-dictionary.cc test/cctest/test-disasm-arm.cc test/cctest/test-disasm-ia32.cc test/cctest/test-disasm-mips.cc test/cctest/test-disasm-x64.cc test/cctest/test-dtoa.cc test/cctest/test-fixed-dtoa.cc test/cctest/test-flags.cc test/cctest/test-func-name-inference.cc test/cctest/test-hashing.cc test/cctest/test-hashmap.cc test/cctest/test-list.cc test/cctest/test-liveedit.cc test/cctest/test-lockers.cc test/cctest/test-log-stack-tracer.cc test/cctest/test-macro-assembler-x64.cc test/cctest/test-mark-compact.cc test/cctest/test-parsing.cc test/cctest/test-random.cc test/cctest/test-regexp.cc test/cctest/test-reloc-info.cc test/cctest/test-serialize.cc test/cctest/test-spaces.cc test/cctest/test-thread-termination.cc test/cctest/test-threads.cc test/cctest/test-utils.cc test/cctest/test-version.cc test/cctest/test-weakmaps.cc test/cctest/testcfg.py test/es5conform/es5conform.status test/es5conform/harness-adapt.js test/es5conform/testcfg.py test/message/message.status test/message/overwritten-builtins.js test/message/overwritten-builtins.out test/message/regress/regress-1527.js test/message/regress/regress-1527.out test/message/regress/regress-73.js test/message/regress/regress-73.out test/message/regress/regress-75.js test/message/regress/regress-75.out test/message/replacement-marker-as-argument.js test/message/replacement-marker-as-argument.out test/message/simple-throw.js test/message/simple-throw.out test/message/testcfg.py test/message/try-catch-finally-no-message.js test/message/try-catch-finally-no-message.out test/message/try-catch-finally-return-in-finally.js test/message/try-catch-finally-return-in-finally.out test/message/try-catch-finally-throw-in-catch-and-finally.js test/message/try-catch-finally-throw-in-catch-and-finally.out test/message/try-catch-finally-throw-in-catch.js test/message/try-catch-finally-throw-in-catch.out test/message/try-catch-finally-throw-in-finally.js test/message/try-catch-finally-throw-in-finally.out test/message/try-finally-return-in-finally.js test/message/try-finally-return-in-finally.out test/message/try-finally-throw-in-finally.js test/message/try-finally-throw-in-finally.out test/message/try-finally-throw-in-try-and-finally.js test/message/try-finally-throw-in-try-and-finally.out test/message/try-finally-throw-in-try.js test/message/try-finally-throw-in-try.out test/mjsunit/accessor-map-sharing.js test/mjsunit/accessors-on-global-object.js test/mjsunit/api-call-after-bypassed-exception.js test/mjsunit/apply-arguments-gc-safepoint.js test/mjsunit/apply.js test/mjsunit/argument-assigned.js test/mjsunit/argument-named-arguments.js test/mjsunit/arguments-apply.js test/mjsunit/arguments-call-apply.js test/mjsunit/arguments-enum.js test/mjsunit/arguments-escape.js test/mjsunit/arguments-indirect.js test/mjsunit/arguments-lazy.js test/mjsunit/arguments-load-across-eval.js test/mjsunit/arguments-opt.js test/mjsunit/arguments-read-and-assignment.js test/mjsunit/arguments.js test/mjsunit/array-bounds-check-removal.js test/mjsunit/array-concat.js test/mjsunit/array-construct-transition.js test/mjsunit/array-constructor.js test/mjsunit/array-elements-from-array-prototype-chain.js test/mjsunit/array-elements-from-array-prototype.js test/mjsunit/array-elements-from-object-prototype.js test/mjsunit/array-functions-prototype-misc.js test/mjsunit/array-functions-prototype.js test/mjsunit/array-indexing.js test/mjsunit/array-iteration.js test/mjsunit/array-join.js test/mjsunit/array-length-number-conversion.js test/mjsunit/array-length.js test/mjsunit/array-literal-transitions.js test/mjsunit/array-pop.js test/mjsunit/array-push.js test/mjsunit/array-reduce.js test/mjsunit/array-shift.js test/mjsunit/array-slice.js test/mjsunit/array-sort.js test/mjsunit/array-splice.js test/mjsunit/array-store-and-grow.js test/mjsunit/array-tostring.js test/mjsunit/array-unshift.js test/mjsunit/ascii-regexp-subject.js test/mjsunit/assert-opt-and-deopt.js test/mjsunit/big-array-literal.js test/mjsunit/big-object-literal.js test/mjsunit/binary-op-newspace.js test/mjsunit/binary-operation-overwrite.js test/mjsunit/bit-not.js test/mjsunit/bitops-info.js test/mjsunit/bitwise-operations-undefined.js test/mjsunit/body-not-visible.js test/mjsunit/boolean.js test/mjsunit/break.js test/mjsunit/bugs/618.js test/mjsunit/bugs/bug-1344252.js test/mjsunit/bugs/bug-222.js test/mjsunit/bugs/bug-2337.js test/mjsunit/bugs/bug-617.js test/mjsunit/bugs/bug-618.js test/mjsunit/bugs/bug-941049.js test/mjsunit/bugs/harmony/debug-blockscopes.js test/mjsunit/builtins.js test/mjsunit/call-non-function-call.js test/mjsunit/call-non-function.js test/mjsunit/call-stub.js test/mjsunit/call.js test/mjsunit/char-escape.js test/mjsunit/class-of-builtins.js test/mjsunit/closure.js test/mjsunit/closures.js test/mjsunit/codegen-coverage.js test/mjsunit/compare-character.js test/mjsunit/compare-known-objects-slow.js test/mjsunit/compare-nan.js test/mjsunit/comparison-ops-and-undefined.js test/mjsunit/compiler/alloc-number.js test/mjsunit/compiler/alloc-object-huge.js test/mjsunit/compiler/alloc-object.js test/mjsunit/compiler/array-access.js test/mjsunit/compiler/array-length.js test/mjsunit/compiler/assignment-deopt.js test/mjsunit/compiler/assignment.js test/mjsunit/compiler/binary-ops.js test/mjsunit/compiler/call-keyed.js test/mjsunit/compiler/compare.js test/mjsunit/compiler/complex-for-in.js test/mjsunit/compiler/control-flow-0.js test/mjsunit/compiler/control-flow-1.js test/mjsunit/compiler/control-flow-2.js test/mjsunit/compiler/count-deopt.js test/mjsunit/compiler/countoperation.js test/mjsunit/compiler/delete.js test/mjsunit/compiler/deopt-args.js test/mjsunit/compiler/deopt-inlined-smi.js test/mjsunit/compiler/eval-introduced-closure.js test/mjsunit/compiler/expression-trees.js test/mjsunit/compiler/for-stmt.js test/mjsunit/compiler/function-call.js test/mjsunit/compiler/global-accessors.js test/mjsunit/compiler/globals.js test/mjsunit/compiler/inline-accessors.js test/mjsunit/compiler/inline-arguments.js test/mjsunit/compiler/inline-arity-mismatch.js test/mjsunit/compiler/inline-compare.js test/mjsunit/compiler/inline-conditional.js test/mjsunit/compiler/inline-construct.js test/mjsunit/compiler/inline-context-slots.js test/mjsunit/compiler/inline-global-access.js test/mjsunit/compiler/inline-literals.js test/mjsunit/compiler/inline-param.js test/mjsunit/compiler/inline-throw.js test/mjsunit/compiler/inline-two.js test/mjsunit/compiler/jsnatives.js test/mjsunit/compiler/lazy-const-lookup.js test/mjsunit/compiler/literals-assignment.js test/mjsunit/compiler/literals-optimized.js test/mjsunit/compiler/literals.js test/mjsunit/compiler/logical-and.js test/mjsunit/compiler/logical-or.js test/mjsunit/compiler/loopcount.js test/mjsunit/compiler/loops.js test/mjsunit/compiler/math-floor-global.js test/mjsunit/compiler/math-floor-local.js test/mjsunit/compiler/null-compare.js test/mjsunit/compiler/objectliterals.js test/mjsunit/compiler/optimize-bitnot.js test/mjsunit/compiler/optimized-closures.js test/mjsunit/compiler/optimized-for-in.js test/mjsunit/compiler/optimized-function-calls.js test/mjsunit/compiler/pic.js test/mjsunit/compiler/property-calls.js test/mjsunit/compiler/property-refs.js test/mjsunit/compiler/property-simple.js test/mjsunit/compiler/property-stores.js test/mjsunit/compiler/recursive-deopt.js test/mjsunit/compiler/regress-0.js test/mjsunit/compiler/regress-1.js test/mjsunit/compiler/regress-106351.js test/mjsunit/compiler/regress-1085.js test/mjsunit/compiler/regress-1394.js test/mjsunit/compiler/regress-2.js test/mjsunit/compiler/regress-3.js test/mjsunit/compiler/regress-3136962.js test/mjsunit/compiler/regress-3185901.js test/mjsunit/compiler/regress-3218915.js test/mjsunit/compiler/regress-3249650.js test/mjsunit/compiler/regress-3260426.js test/mjsunit/compiler/regress-4.js test/mjsunit/compiler/regress-5.js test/mjsunit/compiler/regress-6.js test/mjsunit/compiler/regress-7.js test/mjsunit/compiler/regress-8.js test/mjsunit/compiler/regress-96989.js test/mjsunit/compiler/regress-arguments.js test/mjsunit/compiler/regress-arrayliteral.js test/mjsunit/compiler/regress-closures-with-eval.js test/mjsunit/compiler/regress-const.js test/mjsunit/compiler/regress-deopt-call-as-function.js test/mjsunit/compiler/regress-funarguments.js test/mjsunit/compiler/regress-funcaller.js test/mjsunit/compiler/regress-gap.js test/mjsunit/compiler/regress-gvn.js test/mjsunit/compiler/regress-inline-callfunctionstub.js test/mjsunit/compiler/regress-intoverflow.js test/mjsunit/compiler/regress-lazy-deopt.js test/mjsunit/compiler/regress-lbranch-double.js test/mjsunit/compiler/regress-loadfield.js test/mjsunit/compiler/regress-loop-deopt.js test/mjsunit/compiler/regress-max-locals-for-osr.js test/mjsunit/compiler/regress-max.js test/mjsunit/compiler/regress-or.js test/mjsunit/compiler/regress-rep-change.js test/mjsunit/compiler/regress-serialized-slots.js test/mjsunit/compiler/regress-stacktrace-methods.js test/mjsunit/compiler/regress-stacktrace.js test/mjsunit/compiler/regress-toint32.js test/mjsunit/compiler/regress-valueof.js test/mjsunit/compiler/safepoint.js test/mjsunit/compiler/short-circuit.js test/mjsunit/compiler/simple-bailouts.js test/mjsunit/compiler/simple-binary-op.js test/mjsunit/compiler/simple-deopt.js test/mjsunit/compiler/simple-global-access.js test/mjsunit/compiler/simple-inlining.js test/mjsunit/compiler/simple-osr.js test/mjsunit/compiler/strict-recompile.js test/mjsunit/compiler/switch-bailout.js test/mjsunit/compiler/this-property-refs.js test/mjsunit/compiler/thisfunction.js test/mjsunit/compiler/uint32.js test/mjsunit/compiler/unary-add.js test/mjsunit/compiler/variables.js test/mjsunit/const-declaration.js test/mjsunit/const-eval-init.js test/mjsunit/const-redecl.js test/mjsunit/const.js test/mjsunit/constant-folding.js test/mjsunit/context-variable-assignments.js test/mjsunit/copy-on-write-assert.js test/mjsunit/count-based-osr.js test/mjsunit/cyclic-array-to-string.js test/mjsunit/cyrillic.js test/mjsunit/d8-os.js test/mjsunit/date-parse.js test/mjsunit/date.js test/mjsunit/debug-backtrace-text.js test/mjsunit/debug-backtrace.js test/mjsunit/debug-break-inline.js test/mjsunit/debug-breakpoints.js test/mjsunit/debug-changebreakpoint.js test/mjsunit/debug-clearbreakpoint.js test/mjsunit/debug-clearbreakpointgroup.js test/mjsunit/debug-compile-event-newfunction.js test/mjsunit/debug-compile-event.js test/mjsunit/debug-conditional-breakpoints.js test/mjsunit/debug-constructed-by.js test/mjsunit/debug-constructor.js test/mjsunit/debug-continue.js test/mjsunit/debug-enable-disable-breakpoints.js test/mjsunit/debug-evaluate-arguments.js test/mjsunit/debug-evaluate-bool-constructor.js test/mjsunit/debug-evaluate-locals-optimized-double.js test/mjsunit/debug-evaluate-locals-optimized.js test/mjsunit/debug-evaluate-locals.js test/mjsunit/debug-evaluate-recursive.js test/mjsunit/debug-evaluate-with-context.js test/mjsunit/debug-evaluate-with.js test/mjsunit/debug-evaluate.js test/mjsunit/debug-event-listener.js test/mjsunit/debug-function-scopes.js test/mjsunit/debug-handle.js test/mjsunit/debug-ignore-breakpoints.js test/mjsunit/debug-listbreakpoints.js test/mjsunit/debug-liveedit-1.js test/mjsunit/debug-liveedit-2.js test/mjsunit/debug-liveedit-3.js test/mjsunit/debug-liveedit-breakpoints.js test/mjsunit/debug-liveedit-check-stack.js test/mjsunit/debug-liveedit-diff.js test/mjsunit/debug-liveedit-double-call.js test/mjsunit/debug-liveedit-newsource.js test/mjsunit/debug-liveedit-patch-positions-replace.js test/mjsunit/debug-liveedit-patch-positions.js test/mjsunit/debug-liveedit-restart-frame.js test/mjsunit/debug-liveedit-stack-padding.js test/mjsunit/debug-liveedit-utils.js test/mjsunit/debug-mirror-cache.js test/mjsunit/debug-multiple-breakpoints.js test/mjsunit/debug-receiver.js test/mjsunit/debug-referenced-by.js test/mjsunit/debug-references.js test/mjsunit/debug-return-value.js test/mjsunit/debug-scopes.js test/mjsunit/debug-script-breakpoints-closure.js test/mjsunit/debug-script-breakpoints-nested.js test/mjsunit/debug-script-breakpoints.js test/mjsunit/debug-script.js test/mjsunit/debug-scripts-request.js test/mjsunit/debug-set-script-source.js test/mjsunit/debug-setbreakpoint.js test/mjsunit/debug-setexceptionbreak.js test/mjsunit/debug-sourceinfo.js test/mjsunit/debug-sourceslice.js test/mjsunit/debug-step-2.js test/mjsunit/debug-step-3.js test/mjsunit/debug-step-stub-callfunction.js test/mjsunit/debug-step.js test/mjsunit/debug-stepin-accessor.js test/mjsunit/debug-stepin-builtin-callback.js test/mjsunit/debug-stepin-builtin.js test/mjsunit/debug-stepin-call-function-stub.js test/mjsunit/debug-stepin-constructor.js test/mjsunit/debug-stepin-function-call.js test/mjsunit/debug-stepnext-do-while.js test/mjsunit/debug-stepout-recursive-function.js test/mjsunit/debug-stepout-scope-part1.js test/mjsunit/debug-stepout-scope-part2.js test/mjsunit/debug-stepout-scope-part3.js test/mjsunit/debug-stepout-scope-part4.js test/mjsunit/debug-stepout-scope-part5.js test/mjsunit/debug-stepout-scope-part6.js test/mjsunit/debug-stepout-scope-part7.js test/mjsunit/debug-stepout-scope-part8.js test/mjsunit/debug-stepout-to-builtin.js test/mjsunit/debug-suspend.js test/mjsunit/debug-version.js test/mjsunit/declare-locally.js test/mjsunit/deep-recursion.js test/mjsunit/define-property-gc.js test/mjsunit/delay-syntax-error.js test/mjsunit/delete-global-properties.js test/mjsunit/delete-in-eval.js test/mjsunit/delete-in-with.js test/mjsunit/delete-non-configurable.js test/mjsunit/delete-vars-from-eval.js test/mjsunit/delete.js test/mjsunit/deopt-minus-zero.js test/mjsunit/div-mod.js test/mjsunit/do-not-strip-fc.js test/mjsunit/dont-enum-array-holes.js test/mjsunit/dont-reinit-global-var.js test/mjsunit/double-equals.js test/mjsunit/dtoa.js test/mjsunit/elements-kind-depends.js test/mjsunit/elements-kind.js test/mjsunit/elements-transition-hoisting.js test/mjsunit/elements-transition.js test/mjsunit/enumeration-order.js test/mjsunit/error-constructors.js test/mjsunit/error-tostring.js test/mjsunit/escape.js test/mjsunit/eval-enclosing-function-name.js test/mjsunit/eval-stack-trace.js test/mjsunit/eval-typeof-non-existing.js test/mjsunit/eval.js test/mjsunit/external-array.js test/mjsunit/extra-arguments.js test/mjsunit/extra-commas.js test/mjsunit/fast-array-length.js test/mjsunit/fast-element-smi-check.js test/mjsunit/fast-non-keyed.js test/mjsunit/fast-prototype.js test/mjsunit/for-in-delete.js test/mjsunit/for-in-null-or-undefined.js test/mjsunit/for-in-special-cases.js test/mjsunit/for-in.js test/mjsunit/for.js test/mjsunit/fun-as-prototype.js test/mjsunit/fun-name.js test/mjsunit/function-arguments-null.js test/mjsunit/function-bind.js test/mjsunit/function-call.js test/mjsunit/function-caller.js test/mjsunit/function-named-self-reference.js test/mjsunit/function-names.js test/mjsunit/function-property.js test/mjsunit/function-prototype.js test/mjsunit/function-source.js test/mjsunit/function-without-prototype.js test/mjsunit/function.js test/mjsunit/fuzz-accessors.js test/mjsunit/fuzz-natives-part1.js test/mjsunit/fuzz-natives-part2.js test/mjsunit/fuzz-natives-part3.js test/mjsunit/fuzz-natives-part4.js test/mjsunit/get-own-property-descriptor.js test/mjsunit/get-prototype-of.js test/mjsunit/getter-in-prototype.js test/mjsunit/getter-in-value-prototype.js test/mjsunit/global-accessors.js test/mjsunit/global-const-var-conflicts.js test/mjsunit/global-deleted-property-ic.js test/mjsunit/global-deleted-property-keyed.js test/mjsunit/global-ic.js test/mjsunit/global-load-from-eval-in-with.js test/mjsunit/global-load-from-eval.js test/mjsunit/global-load-from-nested-eval.js test/mjsunit/global-vars-eval.js test/mjsunit/global-vars-with.js test/mjsunit/greedy.js test/mjsunit/harmony/block-conflicts.js test/mjsunit/harmony/block-const-assign.js test/mjsunit/harmony/block-early-errors.js test/mjsunit/harmony/block-for.js test/mjsunit/harmony/block-lazy-compile.js test/mjsunit/harmony/block-leave.js test/mjsunit/harmony/block-let-crankshaft.js test/mjsunit/harmony/block-let-declaration.js test/mjsunit/harmony/block-let-semantics.js test/mjsunit/harmony/block-scoping.js test/mjsunit/harmony/collections.js test/mjsunit/harmony/debug-blockscopes.js test/mjsunit/harmony/debug-evaluate-blockscopes.js test/mjsunit/harmony/debug-function-scopes.js test/mjsunit/harmony/module-linking.js test/mjsunit/harmony/module-parsing.js test/mjsunit/harmony/module-recompile.js test/mjsunit/harmony/module-resolution.js test/mjsunit/harmony/proxies-example-membrane.js test/mjsunit/harmony/proxies-for.js test/mjsunit/harmony/proxies-function.js test/mjsunit/harmony/proxies-hash.js test/mjsunit/harmony/proxies.js test/mjsunit/harmony/typeof.js test/mjsunit/has-own-property.js test/mjsunit/hex-parsing.js test/mjsunit/html-comments.js test/mjsunit/html-string-funcs.js test/mjsunit/if-in-undefined.js test/mjsunit/in.js test/mjsunit/indexed-accessors.js test/mjsunit/indexed-value-properties.js test/mjsunit/instanceof.js test/mjsunit/int32-ops.js test/mjsunit/integer-to-string.js test/mjsunit/invalid-lhs.js test/mjsunit/invalid-source-element.js test/mjsunit/json.js test/mjsunit/keyed-call-generic.js test/mjsunit/keyed-call-ic.js test/mjsunit/keyed-ic.js test/mjsunit/keyed-storage-extend.js test/mjsunit/keywords-and-reserved_words.js test/mjsunit/large-object-allocation.js test/mjsunit/large-object-literal.js test/mjsunit/lazy-load.js test/mjsunit/leakcheck.js test/mjsunit/length.js test/mjsunit/limit-locals.js test/mjsunit/local-load-from-eval.js test/mjsunit/logical.js test/mjsunit/math-abs.js test/mjsunit/math-floor-negative.js test/mjsunit/math-floor-of-div-minus-zero.js test/mjsunit/math-floor-of-div.js test/mjsunit/math-floor-part1.js test/mjsunit/math-floor-part2.js test/mjsunit/math-floor-part3.js test/mjsunit/math-floor-part4.js test/mjsunit/math-min-max.js test/mjsunit/math-pow.js test/mjsunit/math-round.js test/mjsunit/math-sqrt.js test/mjsunit/megamorphic-callbacks.js test/mjsunit/mirror-array.js test/mjsunit/mirror-boolean.js test/mjsunit/mirror-date.js test/mjsunit/mirror-error.js test/mjsunit/mirror-function.js test/mjsunit/mirror-null.js test/mjsunit/mirror-number.js test/mjsunit/mirror-object.js test/mjsunit/mirror-regexp.js test/mjsunit/mirror-script.js test/mjsunit/mirror-string.js test/mjsunit/mirror-undefined.js test/mjsunit/mirror-unresolved-function.js test/mjsunit/mjsunit.js test/mjsunit/mjsunit.status test/mjsunit/mod.js test/mjsunit/mul-exhaustive-part1.js test/mjsunit/mul-exhaustive-part10.js test/mjsunit/mul-exhaustive-part2.js test/mjsunit/mul-exhaustive-part3.js test/mjsunit/mul-exhaustive-part4.js test/mjsunit/mul-exhaustive-part5.js test/mjsunit/mul-exhaustive-part6.js test/mjsunit/mul-exhaustive-part7.js test/mjsunit/mul-exhaustive-part8.js test/mjsunit/mul-exhaustive-part9.js test/mjsunit/multiline.js test/mjsunit/multiple-return.js test/mjsunit/negate-zero.js test/mjsunit/negate.js test/mjsunit/new-function.js test/mjsunit/new.js test/mjsunit/newline-in-string.js test/mjsunit/no-branch-elimination.js test/mjsunit/no-octal-constants-above-256.js test/mjsunit/no-semicolon.js test/mjsunit/non-ascii-replace.js test/mjsunit/not.js test/mjsunit/nul-characters.js test/mjsunit/number-is.js test/mjsunit/number-limits.js test/mjsunit/number-string-index-call.js test/mjsunit/number-tostring-small.js test/mjsunit/number-tostring.js test/mjsunit/numops-fuzz-part1.js test/mjsunit/numops-fuzz-part2.js test/mjsunit/numops-fuzz-part3.js test/mjsunit/numops-fuzz-part4.js test/mjsunit/obj-construct.js test/mjsunit/object-create.js test/mjsunit/object-define-properties.js test/mjsunit/object-define-property.js test/mjsunit/object-freeze.js test/mjsunit/object-get-own-property-names.js test/mjsunit/object-is.js test/mjsunit/object-literal-conversions.js test/mjsunit/object-literal-gc.js test/mjsunit/object-literal-overwrite.js test/mjsunit/object-literal.js test/mjsunit/object-prevent-extensions.js test/mjsunit/object-seal.js test/mjsunit/object-toprimitive.js test/mjsunit/optimized-typeof.js test/mjsunit/override-read-only-property.js test/mjsunit/packed-elements.js test/mjsunit/parse-int-float.js test/mjsunit/pixel-array-rounding.js test/mjsunit/polymorph-arrays.js test/mjsunit/property-load-across-eval.js test/mjsunit/property-object-key.js test/mjsunit/proto.js test/mjsunit/prototype.js test/mjsunit/readonly.js test/mjsunit/receiver-in-with-calls.js test/mjsunit/regexp-UC16.js test/mjsunit/regexp-cache-replace.js test/mjsunit/regexp-call-as-function.js test/mjsunit/regexp-capture-3.js test/mjsunit/regexp-capture.js test/mjsunit/regexp-captures.js test/mjsunit/regexp-compile.js test/mjsunit/regexp-global.js test/mjsunit/regexp-indexof.js test/mjsunit/regexp-lookahead.js test/mjsunit/regexp-loop-capture.js test/mjsunit/regexp-multiline-stack-trace.js test/mjsunit/regexp-multiline.js test/mjsunit/regexp-results-cache.js test/mjsunit/regexp-standalones.js test/mjsunit/regexp-static.js test/mjsunit/regexp-string-methods.js test/mjsunit/regexp.js test/mjsunit/regress/bitops-register-alias.js test/mjsunit/regress/regress-100409.js test/mjsunit/regress/regress-100702.js test/mjsunit/regress/regress-1015.js test/mjsunit/regress/regress-1017.js test/mjsunit/regress/regress-1020.js test/mjsunit/regress/regress-102153.js test/mjsunit/regress/regress-1030466.js test/mjsunit/regress/regress-103259.js test/mjsunit/regress/regress-1036894.js test/mjsunit/regress/regress-1039610.js test/mjsunit/regress/regress-1050043.js test/mjsunit/regress/regress-1060.js test/mjsunit/regress/regress-1062422.js test/mjsunit/regress/regress-1066899.js test/mjsunit/regress/regress-1079.js test/mjsunit/regress/regress-1081309.js test/mjsunit/regress/regress-108296.js test/mjsunit/regress/regress-1083.js test/mjsunit/regress/regress-109195.js test/mjsunit/regress/regress-1092.js test/mjsunit/regress/regress-1099.js test/mjsunit/regress/regress-1102760.js test/mjsunit/regress/regress-1103.js test/mjsunit/regress/regress-1104.js test/mjsunit/regress/regress-110509.js test/mjsunit/regress/regress-1106.js test/mjsunit/regress/regress-1107.js test/mjsunit/regress/regress-1110.js test/mjsunit/regress/regress-1110164.js test/mjsunit/regress/regress-1112.js test/mjsunit/regress/regress-1112051.js test/mjsunit/regress/regress-1114040.js test/mjsunit/regress/regress-1117.js test/mjsunit/regress/regress-1118.js test/mjsunit/regress/regress-1119.js test/mjsunit/regress/regress-1120.js test/mjsunit/regress/regress-1121.js test/mjsunit/regress/regress-1122.js test/mjsunit/regress/regress-1125.js test/mjsunit/regress/regress-1126.js test/mjsunit/regress/regress-1129.js test/mjsunit/regress/regress-1130.js test/mjsunit/regress/regress-1131.js test/mjsunit/regress/regress-1132.js test/mjsunit/regress/regress-1134697.js test/mjsunit/regress/regress-113924.js test/mjsunit/regress/regress-114.js test/mjsunit/regress/regress-1145.js test/mjsunit/regress/regress-1146.js test/mjsunit/regress/regress-1149.js test/mjsunit/regress/regress-1150.js test/mjsunit/regress/regress-1151.js test/mjsunit/regress/regress-115100.js test/mjsunit/regress/regress-115452.js test/mjsunit/regress/regress-1156.js test/mjsunit/regress/regress-116.js test/mjsunit/regress/regress-1160.js test/mjsunit/regress/regress-1166.js test/mjsunit/regress/regress-1167.js test/mjsunit/regress/regress-1170.js test/mjsunit/regress/regress-1170187.js test/mjsunit/regress/regress-1172-bis.js test/mjsunit/regress/regress-1172.js test/mjsunit/regress/regress-1173979.js test/mjsunit/regress/regress-1174.js test/mjsunit/regress/regress-117409.js test/mjsunit/regress/regress-1175390.js test/mjsunit/regress/regress-1176.js test/mjsunit/regress/regress-1177518.js test/mjsunit/regress/regress-1177809.js test/mjsunit/regress/regress-117794.js test/mjsunit/regress/regress-1178598.js test/mjsunit/regress/regress-1181.js test/mjsunit/regress/regress-1182832.js test/mjsunit/regress/regress-1184.js test/mjsunit/regress/regress-1187524.js test/mjsunit/regress/regress-119429.js test/mjsunit/regress/regress-119609.js test/mjsunit/regress/regress-119925.js test/mjsunit/regress/regress-1199401.js test/mjsunit/regress/regress-1199637.js test/mjsunit/regress/regress-1200351.js test/mjsunit/regress/regress-120099.js test/mjsunit/regress/regress-1201933.js test/mjsunit/regress/regress-1203459.js test/mjsunit/regress/regress-1207.js test/mjsunit/regress/regress-1207276.js test/mjsunit/regress/regress-1209.js test/mjsunit/regress/regress-1210.js test/mjsunit/regress/regress-1213.js test/mjsunit/regress/regress-1213516.js test/mjsunit/regress/regress-1213575.js test/mjsunit/regress/regress-121407.js test/mjsunit/regress/regress-1215.js test/mjsunit/regress/regress-1215653.js test/mjsunit/regress/regress-1217.js test/mjsunit/regress/regress-1218.js test/mjsunit/regress/regress-1229.js test/mjsunit/regress/regress-1233.js test/mjsunit/regress/regress-123512.js test/mjsunit/regress/regress-1236.js test/mjsunit/regress/regress-1237.js test/mjsunit/regress/regress-123919.js test/mjsunit/regress/regress-124.js test/mjsunit/regress/regress-1240.js test/mjsunit/regress/regress-124594.js test/mjsunit/regress/regress-1246.js test/mjsunit/regress/regress-1254366.js test/mjsunit/regress/regress-125515.js test/mjsunit/regress/regress-1257.js test/mjsunit/regress/regress-126412.js test/mjsunit/regress/regress-1278.js test/mjsunit/regress/regress-128018.js test/mjsunit/regress/regress-128146.js test/mjsunit/regress/regress-1309.js test/mjsunit/regress/regress-131923.js test/mjsunit/regress/regress-131994.js test/mjsunit/regress/regress-1323.js test/mjsunit/regress/regress-1327557.js test/mjsunit/regress/regress-133211.js test/mjsunit/regress/regress-133211b.js test/mjsunit/regress/regress-1337.js test/mjsunit/regress/regress-1346700.js test/mjsunit/regress/regress-1351.js test/mjsunit/regress/regress-1355.js test/mjsunit/regress/regress-1360.js test/mjsunit/regress/regress-136048.js test/mjsunit/regress/regress-1365.js test/mjsunit/regress/regress-1369.js test/mjsunit/regress/regress-137.js test/mjsunit/regress/regress-137768.js test/mjsunit/regress/regress-1383.js test/mjsunit/regress/regress-1387.js test/mjsunit/regress/regress-1389.js test/mjsunit/regress/regress-1401.js test/mjsunit/regress/regress-1403.js test/mjsunit/regress/regress-1412.js test/mjsunit/regress/regress-1415.js test/mjsunit/regress/regress-1419.js test/mjsunit/regress/regress-1423.js test/mjsunit/regress/regress-1434.js test/mjsunit/regress/regress-1436.js test/mjsunit/regress/regress-1439135.js test/mjsunit/regress/regress-143967.js test/mjsunit/regress/regress-1447.js test/mjsunit/regress/regress-145201.js test/mjsunit/regress/regress-1472.js test/mjsunit/regress/regress-1476.js test/mjsunit/regress/regress-148378.js test/mjsunit/regress/regress-149.js test/mjsunit/regress/regress-1491.js test/mjsunit/regress/regress-1493017.js test/mjsunit/regress/regress-1513.js test/mjsunit/regress/regress-1521.js test/mjsunit/regress/regress-1523.js test/mjsunit/regress/regress-1528.js test/mjsunit/regress/regress-1529.js test/mjsunit/regress/regress-1530.js test/mjsunit/regress/regress-1531.js test/mjsunit/regress/regress-1546.js test/mjsunit/regress/regress-1548.js test/mjsunit/regress/regress-155924.js test/mjsunit/regress/regress-1560.js test/mjsunit/regress/regress-1563.js test/mjsunit/regress/regress-1582.js test/mjsunit/regress/regress-1583.js test/mjsunit/regress/regress-1586.js test/mjsunit/regress/regress-1591.js test/mjsunit/regress/regress-1592.js test/mjsunit/regress/regress-1620.js test/mjsunit/regress/regress-1624-strict.js test/mjsunit/regress/regress-1624.js test/mjsunit/regress/regress-1625.js test/mjsunit/regress/regress-1639-2.js test/mjsunit/regress/regress-1639.js test/mjsunit/regress/regress-1647.js test/mjsunit/regress/regress-1650.js test/mjsunit/regress/regress-1692.js test/mjsunit/regress/regress-1708.js test/mjsunit/regress/regress-171.js test/mjsunit/regress/regress-1711.js test/mjsunit/regress/regress-1713.js test/mjsunit/regress/regress-1748.js test/mjsunit/regress/regress-1757.js test/mjsunit/regress/regress-176.js test/mjsunit/regress/regress-1790.js test/mjsunit/regress/regress-1849.js test/mjsunit/regress/regress-1853.js test/mjsunit/regress/regress-186.js test/mjsunit/regress/regress-187.js test/mjsunit/regress/regress-1878.js test/mjsunit/regress/regress-189.js test/mjsunit/regress/regress-1898.js test/mjsunit/regress/regress-191.js test/mjsunit/regress/regress-1919169.js test/mjsunit/regress/regress-192.js test/mjsunit/regress/regress-1924.js test/mjsunit/regress/regress-193.js test/mjsunit/regress/regress-1945.js test/mjsunit/regress/regress-1973.js test/mjsunit/regress/regress-1980.js test/mjsunit/regress/regress-20070207.js test/mjsunit/regress/regress-201.js test/mjsunit/regress/regress-2027.js test/mjsunit/regress/regress-2030.js test/mjsunit/regress/regress-2032.js test/mjsunit/regress/regress-2034.js test/mjsunit/regress/regress-2045.js test/mjsunit/regress/regress-2054.js test/mjsunit/regress/regress-2055.js test/mjsunit/regress/regress-2056.js test/mjsunit/regress/regress-2058.js test/mjsunit/regress/regress-2071.js test/mjsunit/regress/regress-2110.js test/mjsunit/regress/regress-2119.js test/mjsunit/regress/regress-2153.js test/mjsunit/regress/regress-2156.js test/mjsunit/regress/regress-2163.js test/mjsunit/regress/regress-2170.js test/mjsunit/regress/regress-2172.js test/mjsunit/regress/regress-2185-2.js test/mjsunit/regress/regress-2185.js test/mjsunit/regress/regress-2186.js test/mjsunit/regress/regress-219.js test/mjsunit/regress/regress-2193.js test/mjsunit/regress/regress-220.js test/mjsunit/regress/regress-2219.js test/mjsunit/regress/regress-2225.js test/mjsunit/regress/regress-2226.js test/mjsunit/regress/regress-2234.js test/mjsunit/regress/regress-2249.js test/mjsunit/regress/regress-2249423.js test/mjsunit/regress/regress-225.js test/mjsunit/regress/regress-2250.js test/mjsunit/regress/regress-2261.js test/mjsunit/regress/regress-227.js test/mjsunit/regress/regress-2284.js test/mjsunit/regress/regress-2285.js test/mjsunit/regress/regress-2286.js test/mjsunit/regress/regress-2289.js test/mjsunit/regress/regress-2291.js test/mjsunit/regress/regress-2294.js test/mjsunit/regress/regress-2296.js test/mjsunit/regress/regress-231.js test/mjsunit/regress/regress-2315.js test/mjsunit/regress/regress-2318.js test/mjsunit/regress/regress-2322.js test/mjsunit/regress/regress-2326.js test/mjsunit/regress/regress-233.js test/mjsunit/regress/regress-2336.js test/mjsunit/regress/regress-2339.js test/mjsunit/regress/regress-2346.js test/mjsunit/regress/regress-2373.js test/mjsunit/regress/regress-2374.js test/mjsunit/regress/regress-244.js test/mjsunit/regress/regress-246.js test/mjsunit/regress/regress-2489.js test/mjsunit/regress/regress-253.js test/mjsunit/regress/regress-254.js test/mjsunit/regress/regress-259.js test/mjsunit/regress/regress-260.js test/mjsunit/regress/regress-263.js test/mjsunit/regress/regress-265.js test/mjsunit/regress/regress-267.js test/mjsunit/regress/regress-269.js test/mjsunit/regress/regress-279.js test/mjsunit/regress/regress-284.js test/mjsunit/regress/regress-286.js test/mjsunit/regress/regress-294.js test/mjsunit/regress/regress-3006390.js test/mjsunit/regress/regress-312.js test/mjsunit/regress/regress-317.js test/mjsunit/regress/regress-318.js test/mjsunit/regress/regress-3185905.js test/mjsunit/regress/regress-3199913.js test/mjsunit/regress/regress-3218530.js test/mjsunit/regress/regress-3218915.js test/mjsunit/regress/regress-3230771.js test/mjsunit/regress/regress-3247124.js test/mjsunit/regress/regress-3252443.js test/mjsunit/regress/regress-326.js test/mjsunit/regress/regress-334.js test/mjsunit/regress/regress-3408144.js test/mjsunit/regress/regress-341.js test/mjsunit/regress/regress-345.js test/mjsunit/regress/regress-349.js test/mjsunit/regress/regress-35.js test/mjsunit/regress/regress-351.js test/mjsunit/regress/regress-386.js test/mjsunit/regress/regress-392.js test/mjsunit/regress/regress-394.js test/mjsunit/regress/regress-396.js test/mjsunit/regress/regress-397.js test/mjsunit/regress/regress-399.js test/mjsunit/regress/regress-406.js test/mjsunit/regress/regress-416.js test/mjsunit/regress/regress-45469.js test/mjsunit/regress/regress-475.js test/mjsunit/regress/regress-483.js test/mjsunit/regress/regress-485.js test/mjsunit/regress/regress-486.js test/mjsunit/regress/regress-490.js test/mjsunit/regress/regress-491.js test/mjsunit/regress/regress-492.js test/mjsunit/regress/regress-496.js test/mjsunit/regress/regress-502.js test/mjsunit/regress/regress-503.js test/mjsunit/regress/regress-515.js test/mjsunit/regress/regress-524.js test/mjsunit/regress/regress-526.js test/mjsunit/regress/regress-52801.js test/mjsunit/regress/regress-540.js test/mjsunit/regress/regress-545.js test/mjsunit/regress/regress-57.js test/mjsunit/regress/regress-580.js test/mjsunit/regress/regress-58740.js test/mjsunit/regress/regress-588599.js test/mjsunit/regress/regress-6-9-regexp.js test/mjsunit/regress/regress-603.js test/mjsunit/regress/regress-612.js test/mjsunit/regress/regress-619.js test/mjsunit/regress/regress-634.js test/mjsunit/regress/regress-636.js test/mjsunit/regress/regress-641.js test/mjsunit/regress/regress-643.js test/mjsunit/regress/regress-646.js test/mjsunit/regress/regress-662254.js test/mjsunit/regress/regress-666721.js test/mjsunit/regress/regress-667061.js test/mjsunit/regress/regress-670147.js test/mjsunit/regress/regress-674753.js test/mjsunit/regress/regress-675.js test/mjsunit/regress/regress-676025.js test/mjsunit/regress/regress-678525.js test/mjsunit/regress/regress-681.js test/mjsunit/regress/regress-682649.js test/mjsunit/regress/regress-685.js test/mjsunit/regress/regress-687.js test/mjsunit/regress/regress-69.js test/mjsunit/regress/regress-696.js test/mjsunit/regress/regress-697.js test/mjsunit/regress/regress-70066.js test/mjsunit/regress/regress-712.js test/mjsunit/regress/regress-71647.js test/mjsunit/regress/regress-720.js test/mjsunit/regress/regress-728.js test/mjsunit/regress/regress-732.js test/mjsunit/regress/regress-734862.js test/mjsunit/regress/regress-737588.js test/mjsunit/regress/regress-74.js test/mjsunit/regress/regress-747.js test/mjsunit/regress/regress-752.js test/mjsunit/regress/regress-753.js test/mjsunit/regress/regress-754.js test/mjsunit/regress/regress-760-1.js test/mjsunit/regress/regress-760-2.js test/mjsunit/regress/regress-780423.js test/mjsunit/regress/regress-78270.js test/mjsunit/regress/regress-784.js test/mjsunit/regress/regress-794.js test/mjsunit/regress/regress-798.js test/mjsunit/regress/regress-799761.js test/mjsunit/regress/regress-806.js test/mjsunit/regress/regress-806473.js test/mjsunit/regress/regress-815.js test/mjsunit/regress/regress-82769.js test/mjsunit/regress/regress-842.js test/mjsunit/regress/regress-842017.js test/mjsunit/regress/regress-84234.js test/mjsunit/regress/regress-851.js test/mjsunit/regress/regress-85177.js test/mjsunit/regress/regress-857.js test/mjsunit/regress/regress-86.js test/mjsunit/regress/regress-87.js test/mjsunit/regress/regress-874.js test/mjsunit/regress/regress-874178.js test/mjsunit/regress/regress-875031.js test/mjsunit/regress/regress-877615.js test/mjsunit/regress/regress-88591.js test/mjsunit/regress/regress-88858.js test/mjsunit/regress/regress-892742.js test/mjsunit/regress/regress-900.js test/mjsunit/regress/regress-900055.js test/mjsunit/regress/regress-900966.js test/mjsunit/regress/regress-91.js test/mjsunit/regress/regress-91008.js test/mjsunit/regress/regress-91010.js test/mjsunit/regress/regress-91013.js test/mjsunit/regress/regress-91120.js test/mjsunit/regress/regress-91787.js test/mjsunit/regress/regress-918.js test/mjsunit/regress/regress-925537.js test/mjsunit/regress/regress-927.js test/mjsunit/regress/regress-931.js test/mjsunit/regress/regress-937896.js test/mjsunit/regress/regress-944.js test/mjsunit/regress/regress-94425.js test/mjsunit/regress/regress-94873.js test/mjsunit/regress/regress-95113.js test/mjsunit/regress/regress-95485.js test/mjsunit/regress/regress-955.js test/mjsunit/regress/regress-95920.js test/mjsunit/regress/regress-962.js test/mjsunit/regress/regress-96523.js test/mjsunit/regress/regress-969.js test/mjsunit/regress/regress-97116.js test/mjsunit/regress/regress-974.js test/mjsunit/regress/regress-982.js test/mjsunit/regress/regress-98773.js test/mjsunit/regress/regress-990205.js test/mjsunit/regress/regress-99167.js test/mjsunit/regress/regress-992.js test/mjsunit/regress/regress-992733.js test/mjsunit/regress/regress-995.js test/mjsunit/regress/regress-996542.js test/mjsunit/regress/regress-998565.js test/mjsunit/regress/regress-arguments-gc.js test/mjsunit/regress/regress-bind-receiver.js test/mjsunit/regress/regress-builtin-array-op.js test/mjsunit/regress/regress-cnlt-elements.js test/mjsunit/regress/regress-cnlt-enum-indices.js test/mjsunit/regress/regress-cntl-descriptors-enum.js test/mjsunit/regress/regress-conditional-position.js test/mjsunit/regress/regress-convert-enum.js test/mjsunit/regress/regress-convert-enum2.js test/mjsunit/regress/regress-convert-transition.js test/mjsunit/regress/regress-crbug-100859.js test/mjsunit/regress/regress-crbug-107996.js test/mjsunit/regress/regress-crbug-119926.js test/mjsunit/regress/regress-crbug-122271.js test/mjsunit/regress/regress-crbug-125148.js test/mjsunit/regress/regress-crbug-126414.js test/mjsunit/regress/regress-crbug-134055.js test/mjsunit/regress/regress-crbug-134609.js test/mjsunit/regress/regress-crbug-135008.js test/mjsunit/regress/regress-crbug-135066.js test/mjsunit/regress/regress-crbug-137689.js test/mjsunit/regress/regress-crbug-138887.js test/mjsunit/regress/regress-crbug-140083.js test/mjsunit/regress/regress-crbug-142087.js test/mjsunit/regress/regress-crbug-142218.js test/mjsunit/regress/regress-crbug-145961.js test/mjsunit/regress/regress-crbug-146910.js test/mjsunit/regress/regress-crbug-147475.js test/mjsunit/regress/regress-crbug-148376.js test/mjsunit/regress/regress-crbug-150545.js test/mjsunit/regress/regress-crbug-150729.js test/mjsunit/regress/regress-crbug-157019.js test/mjsunit/regress/regress-crbug-157520.js test/mjsunit/regress/regress-crbug-158185.js test/mjsunit/regress/regress-crbug-18639.js test/mjsunit/regress/regress-crbug-3184.js test/mjsunit/regress/regress-crbug-37853.js test/mjsunit/regress/regress-crbug-3867.js test/mjsunit/regress/regress-crbug-39160.js test/mjsunit/regress/regress-crbug-40931.js test/mjsunit/regress/regress-crbug-72736.js test/mjsunit/regress/regress-crbug-84186.js test/mjsunit/regress/regress-crbug-87478.js test/mjsunit/regress/regress-create-exception.js test/mjsunit/regress/regress-debug-code-recompilation.js test/mjsunit/regress/regress-deep-proto.js test/mjsunit/regress/regress-deopt-gc.js test/mjsunit/regress/regress-fast-literal-transition.js test/mjsunit/regress/regress-fundecl.js test/mjsunit/regress/regress-inlining-function-literal-context.js test/mjsunit/regress/regress-iteration-order.js test/mjsunit/regress/regress-lazy-deopt-reloc.js test/mjsunit/regress/regress-load-elements.js test/mjsunit/regress/regress-push-args-twice.js test/mjsunit/regress/regress-r3391.js test/mjsunit/regress/regress-r4998.js test/mjsunit/regress/regress-regexp-codeflush.js test/mjsunit/regress/regress-smi-only-concat.js test/mjsunit/regress/regress-sqrt.js test/mjsunit/regress/regress-swapelements.js test/mjsunit/regress/regress-transcendental.js test/mjsunit/regress/regress-undefined-store-keyed-fast-element.js test/mjsunit/regress/short-circuit.js test/mjsunit/regress/splice-missing-wb.js test/mjsunit/samevalue.js test/mjsunit/scanner.js test/mjsunit/scope-calls-eval.js test/mjsunit/search-string-multiple.js test/mjsunit/setter-on-constructor-prototype.js test/mjsunit/shifts.js test/mjsunit/short-circuit-boolean.js test/mjsunit/simple-constructor.js test/mjsunit/sin-cos.js test/mjsunit/smi-negative-zero.js test/mjsunit/smi-ops-inlined.js test/mjsunit/smi-ops.js test/mjsunit/sparse-array-reverse.js test/mjsunit/sparse-array.js test/mjsunit/stack-traces-2.js test/mjsunit/stack-traces.js test/mjsunit/store-dictionary.js test/mjsunit/str-to-num.js test/mjsunit/stress-array-push.js test/mjsunit/strict-equals.js test/mjsunit/strict-mode-eval.js test/mjsunit/strict-mode-implicit-receiver.js test/mjsunit/strict-mode-opt.js test/mjsunit/strict-mode.js test/mjsunit/string-add.js test/mjsunit/string-case.js test/mjsunit/string-charat.js test/mjsunit/string-charcodeat.js test/mjsunit/string-compare-alignment.js test/mjsunit/string-external-cached.js test/mjsunit/string-externalize.js test/mjsunit/string-flatten.js test/mjsunit/string-fromcharcode.js test/mjsunit/string-index.js test/mjsunit/string-indexof-1.js test/mjsunit/string-indexof-2.js test/mjsunit/string-lastindexof.js test/mjsunit/string-localecompare.js test/mjsunit/string-match.js test/mjsunit/string-replace-gc.js test/mjsunit/string-replace-one-char.js test/mjsunit/string-replace-with-empty.js test/mjsunit/string-replace.js test/mjsunit/string-search.js test/mjsunit/string-slices-regexp.js test/mjsunit/string-slices.js test/mjsunit/string-split-cache.js test/mjsunit/string-split.js test/mjsunit/substr.js test/mjsunit/sum-0-plus-undefined-is-NaN.js test/mjsunit/switch.js test/mjsunit/testcfg.py test/mjsunit/this-in-callbacks.js test/mjsunit/this-property-assignment.js test/mjsunit/this.js test/mjsunit/throw-and-catch-function.js test/mjsunit/throw-exception-for-null-access.js test/mjsunit/to-precision.js test/mjsunit/to_number_order.js test/mjsunit/tobool.js test/mjsunit/toint32.js test/mjsunit/tools/codemap.js test/mjsunit/tools/consarray.js test/mjsunit/tools/csvparser.js test/mjsunit/tools/profile.js test/mjsunit/tools/profile_view.js test/mjsunit/tools/splaytree.js test/mjsunit/tools/tickprocessor.js test/mjsunit/top-level-assignments.js test/mjsunit/touint32.js test/mjsunit/transcendentals.js test/mjsunit/try-catch-extension-object.js test/mjsunit/try-catch-scopes.js test/mjsunit/try-finally-continue.js test/mjsunit/try-finally-nested.js test/mjsunit/try.js test/mjsunit/typed-array-slice.js test/mjsunit/typeof.js test/mjsunit/unbox-double-arrays.js test/mjsunit/undeletable-functions.js test/mjsunit/unicode-case-overoptimization.js test/mjsunit/unicode-string-to-number.js test/mjsunit/unicodelctest-no-optimization.js test/mjsunit/unicodelctest.js test/mjsunit/unusual-constructor.js test/mjsunit/uri.js test/mjsunit/value-callic-prototype-change.js test/mjsunit/value-of.js test/mjsunit/value-wrapper.js test/mjsunit/var.js test/mjsunit/with-function-expression.js test/mjsunit/with-leave.js test/mjsunit/with-parameter-access.js test/mjsunit/with-prototype.js test/mjsunit/with-readonly.js test/mjsunit/with-value.js test/mozilla/mozilla-shell-emulation.js test/mozilla/mozilla.status test/mozilla/testcfg.py test/preparser/duplicate-parameter.pyt test/preparser/duplicate-property.pyt test/preparser/empty.js test/preparser/functions-only.js test/preparser/non-alphanum.js test/preparser/non-use-strict-hex-escape.js test/preparser/non-use-strict-octal-escape.js test/preparser/non-use-strict-uhex-escape.js test/preparser/nonstrict-arguments.js test/preparser/nonstrict-eval.js test/preparser/nonstrict-with.js test/preparser/preparser.status test/preparser/strict-const.js test/preparser/strict-function-statement.pyt test/preparser/strict-identifiers.pyt test/preparser/strict-octal-indirect-regexp.js test/preparser/strict-octal-number.js test/preparser/strict-octal-regexp.js test/preparser/strict-octal-string.js test/preparser/strict-octal-use-strict-after.js test/preparser/strict-octal-use-strict-before.js test/preparser/strict-with.js test/preparser/symbols-only.js test/preparser/testcfg.py test/sputnik/sputnik.status test/sputnik/testcfg.py test/test262/harness-adapt.js test/test262/test262.status test/test262/testcfg.py tools/android-ll-prof.sh tools/android-run.py tools/android-sync.sh tools/bash-completion.sh tools/check-static-initializers.sh tools/codemap.js tools/common-includes.sh tools/consarray.js tools/csvparser.js tools/disasm.py tools/fuzz-harness.sh tools/gc-nvp-trace-processor.py tools/gcmole/Makefile tools/gcmole/gcmole.cc tools/gdb-v8-support.py tools/gen-postmortem-metadata.py tools/grokdump.py tools/gyp/v8.gyp tools/jsmin.py tools/ll_prof.py tools/logreader.js tools/merge-to-branch.sh tools/oom_dump/SConstruct tools/oom_dump/oom_dump.cc tools/process-heap-prof.py tools/profile.js tools/profile_view.js tools/push-to-trunk.sh tools/run-tests.py tools/run-valgrind.py tools/splaytree.js tools/stats-viewer.py tools/status-file-converter.py tools/test-server.py tools/test-wrapper-gypbuild.py tools/test.py tools/testrunner/__init__.py tools/testrunner/local/__init__.py tools/testrunner/local/commands.py tools/testrunner/local/execution.py tools/testrunner/local/old_statusfile.py tools/testrunner/local/progress.py tools/testrunner/local/statusfile.py tools/testrunner/local/testsuite.py tools/testrunner/local/utils.py tools/testrunner/local/verbose.py tools/testrunner/network/__init__.py tools/testrunner/network/distro.py tools/testrunner/network/endpoint.py tools/testrunner/network/network_execution.py tools/testrunner/network/perfdata.py tools/testrunner/objects/__init__.py tools/testrunner/objects/context.py tools/testrunner/objects/output.py tools/testrunner/objects/peer.py tools/testrunner/objects/testcase.py tools/testrunner/objects/workpacket.py tools/testrunner/server/__init__.py tools/testrunner/server/compression.py tools/testrunner/server/constants.py tools/testrunner/server/local_handler.py tools/testrunner/server/main.py tools/testrunner/server/presence_handler.py tools/testrunner/server/signatures.py tools/testrunner/server/status_handler.py tools/testrunner/server/work_handler.py tools/tickprocessor-driver.js tools/tickprocessor.js tools/utils.py Copyright: 2006-2008, the V8 project authors. 2006-2009, the V8 project authors. 2006-2011, the V8 project authors. 2006-2012, the V8 project authors. 2007-2008, the V8 project authors. 2007-2010, the V8 project authors. 2007-2011, the V8 project authors. 2008, the V8 project authors. 2008-2009, the V8 project authors. 2009, the V8 project authors. 2009-2010, the V8 project authors. 2010, the V8 project authors. 2011, the V8 project authors. 2012, the V8 project authors. 2013, the V8 project authors. HOLDERS AND CONTRIBUTORS License: BSD-3-clause FIXME Files: AUTHORS ChangeLog DEPS OWNERS benchmarks/README.txt benchmarks/revisions.html benchmarks/run.html benchmarks/spinning-balls/index.html benchmarks/style.css build/README.txt debian/README.source debian/compat debian/control debian/control.in debian/control.in.in debian/gbp.conf debian/libv8-__API__-dev.install.in debian/patches/0001_kfreebsd.patch debian/patches/0008_mksnapshot_stdout.patch debian/patches/0011_use_system_gyp.patch debian/patches/0012_loongson_force_cache_flush.patch debian/patches/series debian/rules debian/source/format debian/watch debian/watch.in test/es5conform/README test/mjsunit/tools/tickprocessor-test-func-info.log test/mjsunit/tools/tickprocessor-test.default test/mjsunit/tools/tickprocessor-test.func-info test/mjsunit/tools/tickprocessor-test.gc-state test/mjsunit/tools/tickprocessor-test.ignore-unknown test/mjsunit/tools/tickprocessor-test.log test/mjsunit/tools/tickprocessor-test.separate-ic test/preparser/preparser.expectation test/sputnik/README test/test262/README tools/freebsd-tick-processor tools/gcmole/README tools/linux-tick-processor tools/mac-nm tools/mac-tick-processor tools/oom_dump/README tools/testrunner/README tools/testrunner/server/daemon.py tools/v8.xcodeproj/README.txt tools/visual_studio/README.txt tools/windows-tick-processor.bat Copyright: *No copyright* License: UNKNOWN FIXME Files: build/all.gyp test/cctest/test-circular-queue.cc test/cctest/test-conversions.cc test/cctest/test-cpu-profiler.cc test/cctest/test-diy-fp.cc test/cctest/test-double.cc test/cctest/test-fast-dtoa.cc test/cctest/test-heap-profiler.cc test/cctest/test-heap.cc test/cctest/test-lock.cc test/cctest/test-log.cc test/cctest/test-platform-linux.cc test/cctest/test-platform-macos.cc test/cctest/test-platform-nullos.cc test/cctest/test-platform-tls.cc test/cctest/test-platform-win32.cc test/cctest/test-profile-generator.cc test/cctest/test-sockets.cc test/cctest/test-strings.cc test/cctest/test-strtod.cc test/cctest/test-unbound-queue.cc Copyright: 2006-2008, the V8 project authors. 2006-2009, the V8 project authors. 2009, the V8 project authors. 2010, the V8 project authors. 2011, the V8 project authors. 2012, the V8 project authors. License: UNKNOWN FIXME Files: src/assembler.cc src/assembler.h src/ia32/assembler-ia32-inl.h src/ia32/assembler-ia32.h src/mips/assembler-mips-inl.h src/mips/assembler-mips.cc src/mips/assembler-mips.h src/x64/assembler-x64.h Copyright: 1994-2006, Sun Microsystems Inc 2011, the V8 project authors. 2012, the V8 project authors. HOLDERS AND CONTRIBUTORS "AS OWNER OR License: BSD-2-clause FIXME Files: test/mjsunit/third_party/array-isarray.js test/mjsunit/third_party/array-splice-webkit.js test/mjsunit/third_party/object-keys.js test/mjsunit/third_party/string-trim.js Copyright: 2006, Apple Computer, Inc. 2009, Apple Computer, Inc. HOLDERS AND CONTRIBUTORS OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT holder(s) nor the names of any License: BSD-2-clause FIXME Files: src/arm/assembler-arm-inl.h src/arm/assembler-arm.cc src/arm/assembler-arm.h src/ia32/assembler-ia32.cc Copyright: 1994-2006, Sun Microsystems Inc 2012, the V8 project authors. HOLDERS AND CONTRIBUTORS OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT License: BSD-2-clause FIXME Files: LICENSE.valgrind src/third_party/valgrind/valgrind.h Copyright: 2000-2007, Julian Seward. 2000-2010, Julian Seward. License: BSD-3-clause FIXME Files: tools/gcmole/gccause.lua tools/gcmole/gcmole.lua Copyright: 2011, the V8 project authors. HOLDERS AND CONTRIBUTORS License: UNKNOWN FIXME Files: LICENSE.strongtalk Copyright: 1994-2006, Sun Microsystems Inc HOLDERS AND CONTRIBUTORS "AS OWNER OR License: BSD-2-clause FIXME Files: tools/presubmit.py Copyright: License: BSD-3-clause FIXME Files: src/scanner.cc Copyright: && IsCarriageReturn(c0_)) Advance(); && IsLineFeed(c0_)) Advance(); 2011, the V8 project authors. HOLDERS AND CONTRIBUTORS License: BSD-3-clause FIXME Files: src/runtime.cc Copyright: * kJsonQuotesCharactersPerEntry; 2012, the V8 project authors. >= kQuoteTableLength) { HOLDERS AND CONTRIBUTORS || c == 0x200b || c == 0xfeff; License: BSD-3-clause FIXME Files: src/scanner.h Copyright: 2011, the V8 project authors. <= 5) return c + 10; <= 9) return c; HOLDERS AND CONTRIBUTORS License: BSD-3-clause FIXME Files: test/mjsunit/instanceof-2.js Copyright: 2010, the V8 project authors. Foo.prototype = 12; HOLDERS AND CONTRIBUTORS License: BSD-3-clause FIXME Files: test/mjsunit/third_party/regexp-pcre.js Copyright: 1997-2007, University of Cambridge 2007, Google Inc HOLDERS AND CONTRIBUTORS "AS IS" OWNER OR CONTRIBUTORS BE License: BSD-3-clause FIXME Files: tools/js2c.py Copyright: 2011, Google Inc. 2012, the V8 project authors. HOLDERS AND CONTRIBUTORS License: BSD-3-clause FIXME Files: src/hydrogen.cc Copyright: 2012, the V8 project authors. HOLDERS AND CONTRIBUTORS Reachable only normally License: BSD-3-clause FIXME Files: PRESUBMIT.py Copyright: 2012, the V8 project authors. HOLDERS AND CONTRIBUTORS header and trailing whitespaces check failed")) License: BSD-3-clause FIXME Files: src/char-predicates-inl.h Copyright: 2011, the V8 project authors. HOLDERS AND CONTRIBUTORS || IsInRange(AsciiAlphaToLower, 'a', 'f'); License: BSD-3-clause FIXME Files: benchmarks/navier-stokes.js Copyright: 2009, Oliver Hunt 2012, the V8 project authors. License: Expat FIXME Files: benchmarks/crypto.js Copyright: 2003-2005, Tom Wu 2005, Tom Wu License: Expat FIXME Files: benchmarks/deltablue.js Copyright: 1996, John Maloney and Mario Wolczko 2008, the V8 project authors. License: GPL-2+ FIXME Files: benchmarks/earley-boyer.js Copyright: != -1 || // ID-char { return c.val; } { return sc_SYMBOL_PREFIX + c.val; } || c === SC_EOF_OBJECT; || needing == "") License: UNKNOWN FIXME Files: tools/generate-ten-powers.scm Copyright: 2010, the V8 project authors. 2010, the V8 project authors.") HOLDERS AND CONTRIBUTORS License: UNKNOWN FIXME Files: benchmarks/raytrace.js Copyright: 2005-2007, Sam Stephenson License: UNKNOWN FIXME debian/libv8-dev.install0000664000000000000000000000006012160515514012354 0ustar include usr debian/tmp/usr/lib/libv8.so usr/lib debian/control.in0000664000000000000000000000434112277012214011200 0ustar Source: libv8-3.14 Priority: optional Maintainer: Debian Javascript Maintainers Uploaders: Jérémy Lal , Jonas Smedegaard Build-Depends: @cdbs@, gyp Standards-Version: 3.9.3 Section: libs Homepage: http://code.google.com/p/v8/ Vcs-Browser: http://anonscm.debian.org/git/collab-maint/libv8.git Vcs-Git: git://anonscm.debian.org/collab-maint/libv8.git Package: libv8-dev Section: libdevel Architecture: i386 kfreebsd-i386 amd64 kfreebsd-amd64 armel armhf mipsel powerpc ppc64 ppc64el Depends: libv8-3.14.5 (= ${binary:Version}), ${misc:Depends} Conflicts: libv8-legacy-dev, libv8-3.14-dev Replaces: libv8-legacy-dev, libv8-3.14-dev Description: V8 JavaScript engine - development files for latest branch V8 is a high performance JavaScript engine written in C++. It is used in the web browser Chromium. . This package provide development headers for latest V8 branch. Package: libv8-3.14-dev Section: libdevel Architecture: i386 kfreebsd-i386 amd64 kfreebsd-amd64 armel armhf mipsel powerpc ppc64 ppc64el Depends: libv8-3.14.5 (= ${binary:Version}), ${misc:Depends} Provides: libv8-legacy-dev, libv8-dev Conflicts: libv8-dev Replaces: libv8-dev Description: V8 JavaScript engine - development files for 3.14 branch V8 is a high performance JavaScript engine written in C++. It is used in the web browser Chromium. . This package provide development headers for V8 3.14 branch. Package: libv8-3.14.5 Architecture: i386 kfreebsd-i386 amd64 kfreebsd-amd64 armel armhf mipsel powerpc ppc64 ppc64el Depends: ${shlibs:Depends}, ${misc:Depends} Description: V8 JavaScript engine - runtime library V8 is a high performance JavaScript engine written in C++. It is used in the web browser Chromium. . This package provides the dynamic library for V8. Package: libv8-3.14-dbg Priority: extra Section: debug Architecture: i386 kfreebsd-i386 amd64 kfreebsd-amd64 armel armhf mipsel powerpc ppc64 ppc64el Depends: libv8-3.14.5 (= ${binary:Version}), ${misc:Depends} Description: V8 JavaScript engine - debugging symbols V8 is a high performance JavaScript engine written in C++. It is used in the web browser Chromium. . This package provides the debugging symbols for the library. debian/copyright0000664000000000000000000003224712160515514011133 0ustar Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: V8 Upstream-Contact: the V8 Project Authors Source: http://code.google.com/p/v8 https://github.com/v8/v8 Files: * Copyright: 2006-2013 the V8 project authors License: BSD-3-clause~Google Files: src/arm/assembler-arm* src/assembler.* src/ia32/assembler-ia32* src/mips/assembler-mips* src/x64/assembler-x64.h Copyright: 1994-2006,2011, Sun Microsystems Inc. 2011-2012, the V8 Project Authors License: BSD-3-clause~SUN Files: test/mjsunit/third_party/* Copyright: 2006,2009, Apple Computer, Inc. License: BSD-3-clause Files: src/third_party/valgrind/valgrind.h Copyright: 2000-2010, Julian Seward License: BSD-4-clause Files: test/mjsunit/third_party/regexp-pcre.js Copyright: 1997-2007, University of Cambridge 2007, Google Inc License: BSD-3-clause~Cambridge Files: benchmarks/deltablue.js Copyright: 1996, John Maloney 1996, Mario Wolczko 2008, the V8 project authors License: GPL-2+ Files: benchmarks/crypto.js Copyright: 2003-2005, Tom Wu License: MIT~Wu Files: benchmarks/navier-stokes.js Copyright: 2009, Oliver Hunt 2012, the V8 project authors License: MIT Files: benchmarks/raytrace.js Copyright: 2005-2007, Sam Stephenson License: MIT~Prototype Comment: Prototype is freely distributable under the terms of an MIT-style license. For details, see the Prototype web site: http://prototype.conio.net/ Files: build/android.gypi Copyright: 2012, The Chromium Authors 2012, the V8 project authors License: BSD-3-clause~Google Files: tools/testrunner/server/daemon.py Copyright: none (Public Domain) License: public-domain This code has been written by Sander Marechal and published at: http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/ where the author has placed it in the public domain (see comment #6 at http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/#c6 ). Some minor modifications have been made by the V8 authors. The work remains in the public domain. Files: debian/* Copyright: 2008-2009, Fabien Tassin License: BSD-3-clause License: BSD-3-clause Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: . 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. . 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. . 3. Neither the name of the copyright holder(s) nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. . THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. License: BSD-3-clause~Google Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: . * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. . THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. License: BSD-3-clause~SUN Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: . - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of Sun Microsystems nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. . THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. License: BSD-3-clause~Cambridge Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: . * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. . * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. . * Neither the name of the University of Cambridge nor the name of Google Inc. nor the names of their contributors may be used to endorse or promote products derived from this software without specific prior written permission. . THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. License: BSD-4-clause Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: . 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. . 2. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. . 3. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. . 4. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. . THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. License: MIT Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: . The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. . THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. License: MIT~Wu Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: . The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. . THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND, EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. . IN NO EVENT SHALL TOM WU BE LIABLE FOR ANY SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. . In addition, the following condition applies: . All redistributions must retain an intact copy of this copyright notice and disclaimer. License: MIT~Prototype Permission to use, copy, modify, and distribute this software for any purpose without fee is hereby granted, provided that this entire notice is included in all copies of any software which is or includes a copy or modification of this software and in all copies of the supporting documentation for such software. . THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED WARRANTY. IN PARTICULAR, NEITHER THE AUTHOR NOR LUCENT MAKES ANY REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. License: GPL-2+ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. . This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Comment: On Debian systems the 'GNU General Public License' version 2 is located in '/usr/share/common-licenses/GPL-2'. . You should have received a copy of the 'GNU General Public License' along with this program. If not, see . debian/source/0000775000000000000000000000000012173070204010464 5ustar debian/source/format0000664000000000000000000000001412121727233011676 0ustar 3.0 (quilt) debian/changelog0000664000000000000000000005722112314735401011051 0ustar libv8-3.14 (3.14.5.8-5ubuntu2) trusty; urgency=medium * 0099_powerpc_support.patch: Refresh to alow's current libv8-3.14. -- Adam Conrad Wed, 26 Mar 2014 23:44:56 -0600 libv8-3.14 (3.14.5.8-5ubuntu1) trusty; urgency=medium * 0099_powerpc_support.patch: Pull in Andrew Low's powerpc port. * debian/rules: Enable powerpc/ppc64/ppc64el for package builds. -- Adam Conrad Wed, 12 Feb 2014 10:26:54 -0700 libv8-3.14 (3.14.5.8-5) unstable; urgency=medium * Import backport of upstream fix for CVE-2013-6639 and CVE-2013-6640. -- Jérémy Lal Fri, 20 Dec 2013 00:24:18 +0100 libv8-3.14 (3.14.5.8-4) unstable; urgency=low [ Jérémy Lal ] * Patch: 0013_gcc_48_compat, closes: bug#701312. * Allow Build-Dependency on latest libv8 branch version or on a specific branch: libv8--dev or libv8-dev. Ensure switching from one dev package to another works properly. Add a specific case for libv8-3.14 (<=3.14.5.8-3). [ Jonas Smedegaard ] * Tidy long descriptions to talk only about what packages provide, not the uses of it. -- Jérémy Lal Mon, 22 Jul 2013 10:30:56 +0200 libv8-3.14 (3.14.5.8-3) experimental; urgency=low * 0001_kfreebsd.patch: fix tests runner in previous broken patch. -- Jérémy Lal Sat, 04 May 2013 23:33:58 +0200 libv8-3.14 (3.14.5.8-2) experimental; urgency=low * Fix mipsel build (upstream follows same archs names as debian). * 0001_kfreebsd.patch: work around kfreebsd tests runner failure, "This platform lacks a functioning sem_open implementation". -- Jérémy Lal Wed, 01 May 2013 23:18:05 +0200 libv8-3.14 (3.14.5.8-1) experimental; urgency=low [ upstream ] * New release. [ Jérémy Lal ] * Update patches: + Update 0001_kfreebsd: Partially fixed upstream. + Remove 0013_performance_regression_fix: Applied upstream. + Unfuzz patches. * Update version numbers in watch file and package names/dependencies to reflect curent upstream version. * Build-depend unversioned on cdbs: Needed version satisfied in stable and oldstable no longer supported. * Append CPPFLAGS to CXXFLAGS and export LDFLAGS to let gyp catch hardening flags. Maybe fixes hardening (see bug#689754). * Fix remove all *.pyc files below tools dir. * Update copyright file: + Extend coverage for main copyright holder with current year. + Add tools/testrunner/server/daemon.py (in the Public Domain). [ Jonas Smedegaard ] * Explicitly mention Public Domain file as such in Copyright field. * Have git-import-orig suppress upstream .gitignore file. * Drop obsolete "DM-Upload-Allowed" control file field. * Update to use anonscm.debian.org URL for Vcs-Git field. * Track upstream source via github mirror (not limited-use googlecode.com SVN tags). * Include CDBS upstream-tarball snippet, and setup get-orig-source target. * Tidy README.source: + Drop obsolete sections. + Refer to common CDBS+git-buildpackage praxis. + Emphasize explicitly that NMUs can totally ignore control.in. * Tidy rules file: Resolve version strings in fewer shell calls. * Use versioned source package name, to allow multiple branches to coexist: Nodejs often needs more stable release than Chromium. * Git-ignore quilt .pc dir. -- Jonas Smedegaard Tue, 19 Mar 2013 03:20:03 +0100 libv8 (3.10.8.16-1) unstable; urgency=low * New upstream release. [ Jérémy Lal ] * Simplify SONAME naming to only first 3 segments of upstream version (i.e. skip patch level). Add chapter on reasoning to README.source. * Enable architectures kfreebsd-i386, kfreebsd-amd64 and mipsel. + Force mips3 (Loongson) support. + Set test timeout to 400 seconds for mipsel. Closes:bug#670836. Thanks to Steven Chamberlain. * Update patches: + Drop patches 0014 (disable cross-build) and issue9316131 (fix init array literals on ARM): Applied upstream. + Add patch 0001: Fix build flags for kfreebsd and unused-but-set-variable warning for gcc >= 4.6. Thanks to Steven Chamberlain. + Add patch 0012: Force cache flush on Loongson. Fixes mipsel tests. + Add patch 0013: Fix performance regression. + Refresh all remaining patches. * Add MIT license to copyright file for benchmarks/navier-stokes.js. * Autogenerate watch file, to track current branch (= major+minor of upstream version). [ Jonas Smedegaard ] * Update copyright file: + Bump file format to 1.0. + Fix double-indent in Copyright fields as per Policy §5.6.13. * Fix capitalize project name in copyright and long descriptions. -- Jonas Smedegaard Sat, 23 Jun 2012 13:07:52 +0200 libv8 (3.8.9.20-2) unstable; urgency=low * Cherry-picked four upstream patches from 3.8.9.29: + r11654.patch: fix CVE-2011-3111, closes:bug#687574. + r12161.patch: Fix ICs for slow objects with native accessor. + r12336.patch: Fix bug in compare IC. + r12460.patch: Fix some corner cases in skipping native methods using caller. Fix binding in new Function(). -- Jérémy Lal Sat, 29 Sep 2012 01:04:06 +0200 libv8 (3.8.9.20-1) unstable; urgency=low [ Jérémy Lal ] * Upstream update. * Remove mipsel from Architecture list. v8 3.8 branch only supports mips32r2 cpu variant. This excludes Loongson 2E (mips3). Closes:bug#667991. * Unset specific mipsel build flags set in 3.8.9.16-3, now unneeded. [ Jonas Smedegaard ] * Use anonscm.debian.org for Vcs-Browser field. * Define supported archs in rules file (auto-expanded in control file). -- Jonas Smedegaard Sat, 28 Apr 2012 00:02:42 +0200 libv8 (3.8.9.16-3) unstable; urgency=low * mipsel build fixes : + v8_use_mips_abi_hardfloat=false, this lowers EABI requirements. + v8_can_use_fpu_instructions=false, detect if FPU is present. + set -Wno-unused-but-set-variable only on mipsel. -- Jérémy Lal Sat, 07 Apr 2012 16:26:13 +0200 libv8 (3.8.9.16-2) unstable; urgency=low [ Jonas Smedegaard ] * Bump standards-version to 3.9.3. [ Jérémy Lal ] * Re-enable -Wno-unused-but-set-variable, fails on mipsel otherwise. * Upstream patch for failing armel smi arrays, closes:bug#666906. + debian/patches/issue9316131_5001.diff -- Jérémy Lal Sun, 01 Apr 2012 09:14:23 +0200 libv8 (3.8.9.16-1) unstable; urgency=low * New upstream release. [ Jérémy Lal ] * Remove patches: + 0005-enable-i18n-extension: Handled by libv8-i18n now. + 0015_enable_mips_tests.patch: applied upstream. * Stop build-depending on libicu-dev: Handled by libv8-i18n now. * Update copyright file: + Add Files paragraph covering build/android.gypi. -- Jonas Smedegaard Sat, 31 Mar 2012 17:53:29 +0200 libv8 (3.7.12.22-3) unstable; urgency=low * Fix mipsel build, allow test debug-step-3 to fail (non-crucial) -- Jérémy Lal Mon, 20 Feb 2012 14:08:17 +0100 libv8 (3.7.12.22-2) unstable; urgency=low * Adjust tests timeout from 120 to 180 seconds because build server can be busy sometimes. * Fix failing "make check" for mips arch in: + debian/patches/0015_enable_mips_tests.patch -- Jérémy Lal Sun, 19 Feb 2012 18:18:57 +0100 libv8 (3.7.12.22-1) unstable; urgency=low * New upstream release. [ Jérémy Lal ] * Enable mipsel arch. * Refresh patches. Remove unnecessary patches: + 0010_fix_arm_bug.patch (applied upstream). + 0012_make_check_testsuites.patch (uses TESTFLAGS, not TESTSUITES). + 0013_enable_soname.patch (applied upstream). + 0015_hash-collision-fix-v8-3.6.patch (applied upstream. * Fix tabs and whitespaces in debian/samples.gyp. * Stop setting v8_can_use_unaligned_accesses=false during build: works without it - and it even may be triggering an alignment bug. [ Jonas Smedegaard ] * Update copyright file: + Extend (and shorten) some copyright years. + Fix paths of a few files. + Rewrap comments at 72 chars. -- Jonas Smedegaard Mon, 13 Feb 2012 20:18:39 +0100 libv8 (3.6.6.14-2) unstable; urgency=low * Land hash collision fix for V8 3.6. Closes: bug#653962. This fixes CVE-2011-5037. * snapshot=off, because hash is randomized by a secret key that is otherwise readable in the snapshot. -- Jérémy Lal Sat, 07 Jan 2012 22:29:06 +0100 libv8 (3.6.6.14-1) unstable; urgency=low [ Jérémy Lal ] * Upstream update. * Refresh patches. Remove unused 0009_unaligned_access_armel.patch. * Add samples/*.cc, samples/*.js and provide usable samples.gyp to libv8-dev examples. Closes: bug#612362. * Update watch file: Track 3.6.x releases. [ Jonas Smedegaard ] * Newline-separate package relations in control file. -- Jonas Smedegaard Tue, 03 Jan 2012 22:00:29 +0100 libv8 (3.5.10.24-4) unstable; urgency=low * v8_use_arm_eabi_hardfloat must be passed in GYPFLAGS, this should fix armhf build. * Ubuntu vendor needs different armel architecture settings. -- Jérémy Lal Mon, 05 Dec 2011 11:36:38 +0100 libv8 (3.5.10.24-3) unstable; urgency=low * Fix build failures for arm: + arm_neon=0 for armhf + vfp3=off for armel Closes: bug#650548 -- Jérémy Lal Fri, 02 Dec 2011 10:17:48 +0100 libv8 (3.5.10.24-2) unstable; urgency=low * Set -Wno-unused-but-set-variable, i386 build fail otherwise. Closes: bug#650547 * Remove mipsel from architectures. Will be re-enabled when upstream really supports it. Closes: bug#650549 * Disable default arm flags (debian/0014_disable_armv7_defaults.patch) and set them properly for armel and armhf, using GYPFLAGS variable in debian/rules. Closes: bug#650548 * Remove 0009_unaligned_access_armel.patch, never proved it was needed. * Remove -fvisibility=hidden flag, applied upstream. * CCFLAGS are ignored by the build system, use CXXFLAGS instead. -- Jérémy Lal Thu, 01 Dec 2011 14:31:59 +0100 libv8 (3.5.10.24-1) unstable; urgency=low * New upstream release. [ Jérémy Lal ] * Build using gyp (instead of deprecated scons build): + removed scons patches + added gyp patches to achieve the same result. * Remove mipsel architecture, not yet supported upstream. * Tests are run using d8 instead of shell. * Allow parallel builds. * Update watch file: Track 3.5.x releases. -- Jonas Smedegaard Sat, 26 Nov 2011 00:34:40 +0700 libv8 (3.4.14.21-5) unstable; urgency=low [ Jakub Wilk ] * Add patch (taken from upstream VCS) to fix compatibility with ICU 4.8 Closes: bug#648506. Thanks to Julien Cristau for the bug report. -- Jérémy Lal Tue, 15 Nov 2011 23:07:20 +0100 libv8 (3.4.14.21-4) unstable; urgency=low * Fix armel tests. Closes: bug#644163. Thanks to Vyacheslav Egorov. -- Jérémy Lal Fri, 21 Oct 2011 16:24:05 +0200 libv8 (3.4.14.21-3) unstable; urgency=low * Disable VFP for armel * Disable unaligned access for armel -- Jérémy Lal Wed, 28 Sep 2011 23:14:48 +0200 libv8 (3.4.14.21-2) unstable; urgency=low [ Jonas Smedegaard ] * Rewrite short and long descriptions. Closes: bug#624374. Thanks to Erik Esterer. [ Jérémy Lal ] * Redirect output of mksnapshot to stdout. This helps armel debugging. * Backport of fix for upstream issue#1706: unaligned access on armel. -- Jérémy Lal Wed, 28 Sep 2011 11:25:48 +0200 libv8 (3.4.14.21-1) unstable; urgency=low [ Jérémy Lal ] * New upstream release. * Adds -mno-thumb-interwork on armel (armeabi:softfp). Upstream does not support thumb interworking < armv5t. However it is supported on armhf. * Move compiler flags for arm in debian/rules, instead of setting them using a quilt patch. * Relax build-dependencies on cdbs (unneededly tight) and debhelper (needed version satisfied even in oldstable). * Tests timeout after 120 seconds, instead of 60. Some builders can have slower machines. [ Jonas Smedegaard ] * Update watch file: Track 3.4.x releases. * Relax build-dependency on devscripts (needed version satisfied even in oldstable). * Update copyright file: + Fix use Source: (not Upstream-Source:). + Newline-delimit file list. + Use ~ as indicator for derived licenses. + Shorten license comment, and quote license name. -- Jonas Smedegaard Sat, 17 Sep 2011 16:55:28 +0200 libv8 (3.4.14.13-4) experimental; urgency=low * Export I18NExtension::get and I18NExtension::Register. Closes: bug#639596. -- Jérémy Lal Sun, 04 Sep 2011 15:41:48 +0200 libv8 (3.4.14.13-3) experimental; urgency=low * move I18NExtension from v8::internal to v8_i18n namespace, following the externalization of i18n extension in chromium 15. Closes: bug#639596. -- Jérémy Lal Sun, 04 Sep 2011 10:35:09 +0200 libv8 (3.4.14.13-2) experimental; urgency=low * fix include of "v8.h" in i18n-extension.h Closes: bug#639522. Thanks to Giuseppe Iuculano. -- Jérémy Lal Sun, 28 Aug 2011 00:15:25 +0200 libv8 (3.4.14.13-1) experimental; urgency=low * New upstream release. [ Jérémy Lal ] * Bump version in debian/control (and debian/control.in) [ Jonas Smedegaard ] * Add patch 0007 to fix armhf. Closes: bug#636370. Thanks to Konstantinos Margaritis. -- Jonas Smedegaard Wed, 24 Aug 2011 01:21:27 +0200 libv8 (3.4.14-1) experimental; urgency=low * New upstream release. [ Jérémy Lal ] * mipsel architecture added. * Fix FTBFS with gcc 4.6 using -Wnounused-but-set-variable. Closes: bug#625377. * Enable i18n experimental extension: reworked, could be broken. * Unfuzz patches. -- Jonas Smedegaard Sat, 23 Jul 2011 01:14:22 +0200 libv8 (3.1.8.22-1) unstable; urgency=low [ Jérémy Lal ] * Enable i18n experimental extension. Build-Depends on libicu-dev. Closes: bug#627066. Thanks to Giuseppe Iuculano. * Stop special-casing some regression tests on armel (introduced in 3.1.8.10-2): Solved by disabling cctests (since 3.1.8.10-3). [ Jonas Smedegaard ] * Unfuzz patches. -- Jonas Smedegaard Mon, 20 Jun 2011 22:24:02 +0200 libv8 (3.1.8.10-3) unstable; urgency=low * Disable cctests : they fail from time to time and are meant to be used for upstream development purposes. Only javascript tests are run. -- Jérémy Lal Sat, 23 Apr 2011 13:47:42 +0200 libv8 (3.1.8.10-2) unstable; urgency=low * Temporarily allow some tests to fail on armel builds. Closes: bug#623449. * Turn on verbose output when running tests. -- Jérémy Lal Fri, 22 Apr 2011 16:51:10 +0200 libv8 (3.1.8.10-1) unstable; urgency=low [ Jérémy Lal ] * New upstream release. [ Jonas Smedegaard ] * Bump policy compliance to standards-version 3.9.2. * Bump copyright file format to draft 174 of DEP-5. -- Jonas Smedegaard Sat, 16 Apr 2011 12:59:59 +0200 libv8 (2.5.9.21-1) unstable; urgency=low * New upstream release. [ Jérémy Lal ] * Relax regression checking to not cause build to fail. * Use DEB_SCONS_ENVVARS to define SONAME, removing the need for the soname patch, and allowing builds with gold linker. [ Jonas Smedegaard ] * Only relax regression checking when targeted experimental. * Let CDBS auto-resolve build-dependencies and library package name. * Fix tighten build-dependencies on cdbs and debhelper. * Build-depend on dh-buildinfo. -- Jonas Smedegaard Tue, 29 Mar 2011 12:41:52 +0200 libv8 (2.5.9.16-1) unstable; urgency=low * New upstream release. [ Jérémy Lal ] * Add architecture armhf. Closes: #596997 [ Jonas Smedegaard ] * Rewrite copyright file using draft svn166 of DEP5 format. * Supress copyright-checking a few binary files, to not upset dpkg-source. -- Jonas Smedegaard Thu, 24 Feb 2011 16:51:54 +0100 libv8 (2.5.9.15-2) unstable; urgency=low * Builds with snapshot=on. (Closes: #612781) -- Jérémy Lal Sat, 12 Feb 2011 02:27:40 +0100 libv8 (2.5.9.15-1) unstable; urgency=low * New upstream release. [ Jérémy Lal ] * Map DEB_HOST_ARCH and DEB_HOST_ARCH_OS to v8 scons options 'arch' and 'os'. (Closes: #611215) Move to team-maintainance, with me as maintainer and Jonas Smedegaard as uploader. Drop Antonio Radici as uploader: Thanks for your contributions, Antonio! [ Jonas Smedegaard ] * Tidy CDBS variables internally in rules file. -- Jonas Smedegaard Mon, 07 Feb 2011 01:36:39 +0100 libv8 (2.5.9.9-2) unstable; urgency=low * Include v8stdint.h in libv8-dev. -- Jérémy Lal Thu, 20 Jan 2011 23:26:46 +0100 libv8 (2.5.9.9-1) unstable; urgency=low * New upstream release. followed the procedure of README.source to update. * Fix FTBFS on armel. (Closes: #599713) * No longer flag march=armv4t, it's the default debian armel target. (Closes: #596997) * Fix typo in libv8-dbg short description. (Closes: #601855) -- Jérémy Lal Thu, 06 Jan 2011 18:02:20 +0100 libv8 (2.4.7-2) experimental; urgency=low * Fix FTBFS on armel with -Wno-psabi. For more info : http://gcc.gnu.org/bugzilla/show_bug.cgi?id=42748 * Fix FTBFS on mipsel, just remove it, it is not supported upstream, and anyway it won't compile in the 2.2 or 2.3 branches. * Respect policy 4.9.1 for DEB_BUILD_OPTIONS nocheck and parallel. * Remove unused code from debian/rules. -- Jérémy Lal Sat, 09 Oct 2010 21:26:14 +0200 libv8 (2.4.7-1) experimental; urgency=low * New upstream release, followed the procedure of README.source to update. * Removed patch issue3037008_6001.diff, applied upstream. * Removed part of 0002-armel-build.patch, applied upstream. * Target for amd64 is x64, not ia32. * Remove GCC_VERSION=44, no longer needed. * Runs cctest and mjsunit tests after build. -- Jérémy Lal Sat, 02 Oct 2010 12:01:53 +0200 libv8 (2.2.24-6) unstable; urgency=low * CVE-2010-3412 (Closes: #597856) Race condition in the console implementation in Google Chrome before 6.0.472.59 has unspecified impact and attack vectors. -- Jérémy Lal Fri, 24 Sep 2010 11:44:57 +0200 libv8 (2.2.24-5) unstable; urgency=low * Standards-Version 3.9.1. Nothing had to be changed to comply. * Fix chromium-browser: priority optional, depends on libv8 which has priority extra. (Closes: #591148) * Compile with GCC_VERSION=44. With that option, v8 pass all tests, and setting a breakpoint in chromium inspector does not crash. (Closes: #584562) -- Jérémy Lal Thu, 12 Aug 2010 23:39:43 +0200 libv8 (2.2.24-4) unstable; urgency=low * Replace arch: mips with mipsel (on the three packages) -- Jérémy Lal Sun, 25 Jul 2010 09:49:03 +0200 libv8 (2.2.24-3) unstable; urgency=low * Replace arch: mips with mipsel, because only little endian is supported. -- Jérémy Lal Sat, 24 Jul 2010 13:43:28 +0200 libv8 (2.2.24-2) unstable; urgency=low * Upstream patch : fix for...in loops for strings. http://codereview.chromium.org/3037008 -- Jérémy Lal Mon, 19 Jul 2010 14:55:22 +0200 libv8 (2.2.24-1) unstable; urgency=low * New upstream release, followed the procedure of README.source to update. * Standards-Version 3.9.0 * Build for armv4 instead of armv5, fix ARMV5 detection. (Closes: #589057) * Include v8-profiler.h in libv8-dev. (Closes: #589584) -- Jérémy Lal Wed, 14 Jul 2010 18:56:09 +0200 libv8 (2.2.18-1) unstable; urgency=low * New upstream release, followed the procedure of README.source to update. * Drops 0005-constraints-visibility.patch (applied upstream). -- Jérémy Lal Thu, 17 Jun 2010 00:06:53 +0200 libv8 (2.2.7-1) unstable; urgency=low * New upstream release, followed the procedure of README.source to update * debian/patches: + 0005-constraints-visibility.patch: export SetResourceConstraints to allow chromium to compile cleanly -- Antonio Radici Sun, 09 May 2010 21:29:33 +0100 libv8 (2.2.6-1) unstable; urgency=low [ Jérémy Lal ] * New upstream release, previous procedure followed. * Switch to dpkg-source 3.0 (quilt) format. * Uses scons.mk from cdbs. [ Antonio Radici ] * debian/control: added Jérémy to the Uploaders * debian/patches: + added mips64 to 0003-platform-detection-x86_64-and-mips.patch -- Antonio Radici Sat, 01 May 2010 20:04:11 +0100 libv8 (2.1.10-1) unstable; urgency=low * New upstream release + debian/rules: SONAME updated + debian/control: updated any reference to the old version + debian/patches/0001-enable_soname.patch: SONAME updated + debian/libv8-$VERSION.install renamed with the new version * debian/patches: + 0001-enable_soname.patch: slightly modified to use the right SONAME + 0004-add-the-install-target.patch: refreshed * debian/README.source: added a procedure to update to a new version once the correct version is imported -- Antonio Radici Sat, 27 Mar 2010 15:12:42 +0000 libv8 (2.1.6-1) unstable; urgency=low [ Antonio Radici ] * New upstream release + package should now build clean with gcc-4.5 (Closes: 565030) + debian/rules: SONAME var updated + debian/libv8-2.0.3.install edited and renamed to libv8-2.1.6.install + debian/control: s/2.0.3/2.1.6/ * debian/control: Standards-Version bumped to 3.8.4 (no changes required) * debian/patches: + refreshed all patches + x86_64_code.patch: changed to drop any reference to ia64 (it does not seem to build there) [ Jérémy Lal ] * debian/copyright: updated to reflect the missing files/dates * debian/patches: + new naming scheme which preserves the ordering + added mips to the x86_64_code.patch -- Antonio Radici Sun, 21 Mar 2010 10:58:49 +0000 libv8 (2.0.3-2) unstable; urgency=low * Modified the upstream soname (from libv8-2.0.3.so to libv8.so.2.0.3) + restored patches/enable_soname.patch + restore the link commands in debian/rules * Added a libv8-2.0.3.install so that the package is not empty anymore + (Closes: 560433) -- Antonio Radici Fri, 11 Dec 2009 20:33:23 +0000 libv8 (2.0.3-1) unstable; urgency=low * New upstream release * restored jsmin.py, now it is DFSG compliant + debian/copyright modified accordingly + debian/repack.sh not needed anymore + debian/patches: remove-jsmin.patch deleted * using the library's SONAME (2.0.3) + debian/control: libv8-0 renamed to libv8-2.0.3 + debian/patches: deleted enable_soname.patch + debian/rules: adapted to support the upstream SONAME + debian/libv8-0.symbols: removed, we will not maintain the symbols file * debian/patches: all patches refreshed against the latest upstream * debian/patches/x86_64_code.patch: support for ia64 -- Antonio Radici Mon, 07 Dec 2009 19:07:51 +0000 libv8 (1.3.11+dfsg-2) unstable; urgency=low * debian/patches: + armel-build.patch: adding march armv5t to build on armel (Closes: 8636294) + x86_64_code.patch: build 64 bit code on x86_64 (Closes: 548467, 550448) * debian/watch: excluded '..' from the versions (Closes: 550581) * debian/changelog: added DM-Upload-Allowed: yes -- Antonio Radici Sun, 11 Oct 2009 13:33:11 +0100 libv8 (1.3.11+dfsg-1) unstable; urgency=low * Initial release (Closes: #497701) -- Antonio Radici Wed, 16 Sep 2009 23:01:31 +0100 debian/compat0000664000000000000000000000000212121727233010366 0ustar 7 debian/rules0000775000000000000000000001203612276727730010267 0ustar #!/usr/bin/make -f # This needs to run before inclusion of CDBS snippets debian/control:: debian/control.in debian/watch DEB_PHONY_RULES += debian/control.in debian/control.in:: sed $(foreach re,API ABI ARCHS,-e 's/__$(re)__/$($(re))/g') \ < debian/control.in.in > debian/control.in debian/watch:: sed $(foreach re,API ABI ARCHS,-e 's/__$(re)__/$($(re))/g') \ < debian/control.in.in > debian/control.in include /usr/share/cdbs/1/rules/upstream-tarball.mk include /usr/share/cdbs/1/rules/utils.mk include /usr/share/cdbs/1/class/makefile.mk include /usr/share/cdbs/1/rules/debhelper.mk # See README.source for details on these. MAJOR = $(word 1, $(subst .,$(space), $(DEB_UPSTREAM_VERSION))) MINOR = $(word 2, $(subst .,$(space), $(DEB_UPSTREAM_VERSION))) BUILD = $(word 3, $(subst .,$(space), $(DEB_UPSTREAM_VERSION))) API = $(MAJOR).$(MINOR) ABI = $(MAJOR).$(MINOR).$(BUILD) LIBSTEM = libv8.so watchBranch = $(MAJOR)\\.$(MINOR) DEB_UPSTREAM_URL = https://github.com/v8/v8/archive # TODO: use $(DEB_UPSTREAM_TARBALL_VERSION) when upstream properly tags release DEB_UPSTREAM_TARBALL_BASENAME = 6b10fef46e DEB_UPSTREAM_TARBALL_MD5 = be049128a44adf26f4dfb7d1ab55acfd # Supported archs ARCHS = i386 kfreebsd-i386 amd64 kfreebsd-amd64 armel armhf mipsel powerpc ppc64 ppc64el # allow parallel builds DEB_BUILD_PARALLEL=1 # suppress checking binary files, to not upset dpkg-source DEB_COPYRIGHT_CHECK_IGNORE_REGEX = ^(benchmarks/v8-logo\.png|test/mjsunit/unicode-test\.js|debian/(changelog|copyright(|_hints|_newhints)))$ # dpkg-gensymbols(1) - this is not needed since we are not using symbols # DEB_DH_MAKESHLIBS_ARGS = -- -c4 # map HOST ARCH AND OS to v8 options v8arch := $(or $(v8arch),$(if $(filter i386,$(DEB_HOST_ARCH)),ia32)) v8arch := $(or $(v8arch),$(if $(filter kfreebsd-i386,$(DEB_HOST_ARCH)),ia32)) v8arch := $(or $(v8arch),$(if $(filter amd64,$(DEB_HOST_ARCH)),x64)) v8arch := $(or $(v8arch),$(if $(filter kfreebsd-amd64,$(DEB_HOST_ARCH)),x64)) v8arch := $(or $(v8arch),$(if $(filter armel,$(DEB_HOST_ARCH)),arm)) v8arch := $(or $(v8arch),$(if $(filter armhf,$(DEB_HOST_ARCH)),arm)) v8arch := $(or $(v8arch),$(if $(filter mipsel,$(DEB_HOST_ARCH)),mipsel)) v8arch := $(or $(v8arch),$(if $(filter powerpc,$(DEB_HOST_ARCH)),ppc)) v8arch := $(or $(v8arch),$(if $(filter ppc64el,$(DEB_HOST_ARCH)),ppc64)) v8arch := $(or $(v8arch),$(DEB_HOST_ARCH)) v8os := $(or $(v8os),$(if $(filter linux,$(DEB_HOST_ARCH_OS)),linux)) v8os := $(or $(v8os),$(if $(filter kfreebsd,$(DEB_HOST_ARCH_OS)),freebsd)) v8os := $(or $(v8os),$(DEB_HOST_ARCH_OS)) GYPFLAGS += -Dhost_arch=$(v8arch) -DOS=$(v8os) # the default test timeout in seconds timeOut = 180 # build for loongson, which uses mips3, a sub-instruction-set of mips32r2 ifeq (mipsel, $(DEB_HOST_ARCH)) GYPFLAGS += -Dmips_arch_variant=loongson timeOut = 400 endif # armel and armhf arches need flags to work around those issues : # -fno-tree-sink: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=39604 # -Wno-psabi: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=42748 ifeq (armhf, $(DEB_HOST_ARCH)) CXXFLAGS += -fno-tree-sink CXXFLAGS += -Wno-psabi # enable armv7 vfpv3 GYPFLAGS += -Darmv7=1 -Darm_fpu=vfpv3 -Darm_neon=0 -Dv8_use_arm_eabi_hardfloat=true endif ifeq (armel, $(DEB_HOST_ARCH)) CXXFLAGS += -fno-tree-sink CXXFLAGS += -Wno-psabi ifeq ($(shell dpkg-vendor --is ubuntu && echo true),true) # Ubuntu targets armv7+ with VFP and thumb2 support by default for armel GYPFLAGS += -Darmv7=1 -Darm_fpu=vfpv3 -Darm_neon=0 -Dv8_use_arm_eabi_hardfloat=false else DEB_MAKE_EXTRA_ARGS += vfp3=off # Disable thumb-interworking because v8 supports it only on >= armv5t. # http://code.google.com/p/v8/issues/detail?id=590 CXXFLAGS += -mno-thumb-interwork # disable armv7, use softfloat GYPFLAGS += -Darmv7=0 -Dv8_use_arm_eabi_hardfloat=false endif endif # hardening gyp CXXFLAGS+=$(CPPFLAGS) export LDFLAGS export CXXFLAGS export GYPFLAGS DEB_MAKE_EXTRA_ARGS += library=shared snapshot=off soname_version=$(ABI) OS=$(v8os) V=1 DEB_MAKE_CLEAN_TARGET = clean DEB_MAKE_BUILD_TARGET = $(v8arch).release v8out = $(CURDIR)/out/$(v8arch).release/lib.target/$(LIBSTEM).$(ABI) # regression tests # * relax regression tests when targeted experimental suite # * run only javascript tests, cctests are for development purposes DEB_MAKE_CHECK_TARGET = $(v8arch).release.check \ LD_PRELOAD=$(v8out) \ TESTFLAGS="--no-presubmit mjsunit message preparser" \ TESTJOBS="$(DEB_MAKE_PARALLEL) --timeout=$(timeOut)" \ $(if $(shell dpkg-parsechangelog | grep -Fx 'Distribution: experimental'),|| true) DEB_DH_INSTALL_ARGS_libv8-$(ABI) = usr/lib/$(LIBSTEM).$(ABI) # Setup dynamically named debhelper install file during build pre-build:: cp -f debian/libv8-dev.install debian/libv8-$(API)-dev.install cp -f debian/libv8-dev.examples debian/libv8-$(API)-dev.examples clean:: rm -f debian/libv8-$(API)-dev.install rm -f debian/libv8-$(API)-dev.examples clean:: rm -rf out find tools/ -name "*.pyc" -exec rm -f '{}' \; rm -f test/*/*.pyc rm -f test/*/*.status2 common-install-impl:: mkdir -p debian/tmp/usr/lib ; \ cd debian/tmp/usr/lib ; \ cp $(v8out) . ; \ ln -s -T $(LIBSTEM).$(ABI) $(LIBSTEM) debian/samples.gyp0000664000000000000000000000407512121727233011363 0ustar # Copyright 2011 the V8 project authors. All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. { 'includes': [], 'target_defaults': { 'type': 'executable', 'dependencies': [], 'include_dirs': [ '/usr/include' ], 'link_settings': { 'libraries': [ '-lv8', ], }, }, 'targets': [ { 'target_name': 'shell', 'sources': [ 'shell.cc', ], }, { 'target_name': 'process', 'sources': [ 'process.cc', ], }, { 'target_name': 'lineprocessor', 'sources': [ 'lineprocessor.cc', ], } ], } debian/control.in.in0000664000000000000000000000373012173166637011624 0ustar Source: libv8-__API__ Priority: optional Maintainer: Debian Javascript Maintainers Uploaders: Jérémy Lal , Jonas Smedegaard Build-Depends: @cdbs@, gyp Standards-Version: 3.9.3 Section: libs Homepage: http://code.google.com/p/v8/ Vcs-Browser: http://anonscm.debian.org/git/collab-maint/libv8.git Vcs-Git: git://anonscm.debian.org/collab-maint/libv8.git Package: libv8-dev Section: libdevel Architecture: __ARCHS__ Depends: libv8-__ABI__ (= ${binary:Version}), ${misc:Depends} Conflicts: libv8-legacy-dev, libv8-3.14-dev Replaces: libv8-legacy-dev, libv8-3.14-dev Description: V8 JavaScript engine - development files for latest branch V8 is a high performance JavaScript engine written in C++. It is used in the web browser Chromium. . This package provide development headers for latest V8 branch. Package: libv8-__API__-dev Section: libdevel Architecture: __ARCHS__ Depends: libv8-__ABI__ (= ${binary:Version}), ${misc:Depends} Provides: libv8-legacy-dev, libv8-dev Conflicts: libv8-dev Replaces: libv8-dev Description: V8 JavaScript engine - development files for __API__ branch V8 is a high performance JavaScript engine written in C++. It is used in the web browser Chromium. . This package provide development headers for V8 __API__ branch. Package: libv8-__ABI__ Architecture: __ARCHS__ Depends: ${shlibs:Depends}, ${misc:Depends} Description: V8 JavaScript engine - runtime library V8 is a high performance JavaScript engine written in C++. It is used in the web browser Chromium. . This package provides the dynamic library for V8. Package: libv8-__API__-dbg Priority: extra Section: debug Architecture: __ARCHS__ Depends: libv8-__ABI__ (= ${binary:Version}), ${misc:Depends} Description: V8 JavaScript engine - debugging symbols V8 is a high performance JavaScript engine written in C++. It is used in the web browser Chromium. . This package provides the debugging symbols for the library. debian/control0000664000000000000000000000453312276727361010615 0ustar Source: libv8-3.14 Priority: optional Maintainer: Ubuntu Developers XSBC-Original-Maintainer: Debian Javascript Maintainers Uploaders: Jérémy Lal , Jonas Smedegaard Build-Depends: cdbs, devscripts, debhelper, dh-buildinfo, gyp Standards-Version: 3.9.3 Section: libs Homepage: http://code.google.com/p/v8/ Vcs-Browser: http://anonscm.debian.org/git/collab-maint/libv8.git Vcs-Git: git://anonscm.debian.org/collab-maint/libv8.git Package: libv8-dev Section: libdevel Architecture: i386 kfreebsd-i386 amd64 kfreebsd-amd64 armel armhf mipsel powerpc ppc64 ppc64el Depends: libv8-3.14.5 (= ${binary:Version}), ${misc:Depends} Conflicts: libv8-legacy-dev, libv8-3.14-dev Replaces: libv8-legacy-dev, libv8-3.14-dev Description: V8 JavaScript engine - development files for latest branch V8 is a high performance JavaScript engine written in C++. It is used in the web browser Chromium. . This package provide development headers for latest V8 branch. Package: libv8-3.14-dev Section: libdevel Architecture: i386 kfreebsd-i386 amd64 kfreebsd-amd64 armel armhf mipsel powerpc ppc64 ppc64el Depends: libv8-3.14.5 (= ${binary:Version}), ${misc:Depends} Provides: libv8-legacy-dev, libv8-dev Conflicts: libv8-dev Replaces: libv8-dev Description: V8 JavaScript engine - development files for 3.14 branch V8 is a high performance JavaScript engine written in C++. It is used in the web browser Chromium. . This package provide development headers for V8 3.14 branch. Package: libv8-3.14.5 Architecture: i386 kfreebsd-i386 amd64 kfreebsd-amd64 armel armhf mipsel powerpc ppc64 ppc64el Depends: ${shlibs:Depends}, ${misc:Depends} Description: V8 JavaScript engine - runtime library V8 is a high performance JavaScript engine written in C++. It is used in the web browser Chromium. . This package provides the dynamic library for V8. Package: libv8-3.14-dbg Priority: extra Section: debug Architecture: i386 kfreebsd-i386 amd64 kfreebsd-amd64 armel armhf mipsel powerpc ppc64 ppc64el Depends: libv8-3.14.5 (= ${binary:Version}), ${misc:Depends} Description: V8 JavaScript engine - debugging symbols V8 is a high performance JavaScript engine written in C++. It is used in the web browser Chromium. . This package provides the debugging symbols for the library. debian/libv8-dev.examples0000664000000000000000000000005512166012720012525 0ustar samples/*.cc samples/*.js debian/samples.gyp debian/README.source0000664000000000000000000000424312166024074011354 0ustar CDBS+git-buildpackage ===================== This source package uses CDBS and git-buildpackage. NMUs need not (but are encouraged to) make special use of these tools. In particular, the debian/control.in file can be completely ignored. More info here: http://wiki.debian.org/CDBS+git-buildpackage Branches, versions, SONAME ========================== v8 upstream releases versions as major.minor.build.patch. A branch matches a major.minor version (like the 3.8 branch). Given a branch, when the build number stays constant it means it can be considered stable, meaning API/ABI won't change. Examples from http://v8.googlecode.com/svn/tags/ : 3.6.6.4 - 3.6.6.25 3.7.12.7 - 3.7.12.30 3.8.9.1 - 3.8.9.23 3.9.24.1 - 3.9.24.29 3.10.8.1 - 3.10.8.13 To reflect that upstream practice, the debian package use a SONAME built upon major.minor.build version, since version 3.10.8. This gives : libv8.so.3.10.8 This allows easier "patch" updates to libv8, avoiding the need to rebuild all libv8 reverse dependencies. Previous soname would have been libv8.so.3.10.8.14. Important: it is certain that a change in minor version introduces API and ABI breakage. Never define a libv8.so.3 soname ! However, upstream doesn't *guarantee* ABI compatibility, so each patch-level update should be uploaded to experimental first, to make sure any accidental ABI break does not happen. Side note: http://www.upstream-tracker.org/versions/v8.html gives valuable information about upstream involuntarily breaking API/ABI, like what happened with 3.7.12.31, fixed in 3.7.12.32. gbp.conf and branched versions ============================== v8 stable versions (upstream /branches/), are maintained in git-buildpackage, and are mapped to : * (master, upstream) for unstable releases, passing all tests on all archs; * (master-experimental, upstream-experimental) for latest upstream branch releases, tests are disabled because some might fail. * (master-stable, upstream-stable) for libv8 in debian/stable. debian/gbp.conf is configured for each debian branch. When moving experimental to unstable layout, gbp.conf is typically overwritten by merge. It needs to be restored, until #671791 feature is implemented. debian/watch0000664000000000000000000000016512160515514010223 0ustar # Compulsory line, this is a version 3 file version=3 https://github.com/v8/v8/tags .*/archive/(3\.14[\d.]*).tar.gz debian/gbp.conf0000664000000000000000000000051112160515514010604 0ustar [DEFAULT] # master and upstream branches are uploaded to unstable upstream-branch = upstream debian-branch = master # there are separate branches for stable and experimental #upstream-branch = upstream-experimental #debian-branch = master-experimental pristine-tar = True sign-tags = True [git-import-orig] filter = .gitignore debian/patches/0000775000000000000000000000000012277012246010622 5ustar debian/patches/0013_gcc_48_compat.patch0000664000000000000000000000216212166023661014721 0ustar Description: gcc 4.8 compatibility fixes Bug: https://code.google.com/p/v8/issues/detail?id=2149 Bug: https://code.google.com/p/v8/issues/detail?id=2767 Bug-Debian: http://bugs.debian.org/701312 Author: Jérémy Lal Last-Update: 2013-07-06 --- a/src/checks.h +++ b/src/checks.h @@ -248,7 +248,7 @@ #define STATIC_CHECK(test) \ typedef \ StaticAssertionHelper((test))>)> \ - SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__) + SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__) __attribute__((unused)) extern bool FLAG_enable_slow_asserts; --- a/test/cctest/test-macro-assembler-x64.cc +++ b/test/cctest/test-macro-assembler-x64.cc @@ -2185,7 +2185,7 @@ TEST(OperandOffset) { v8::internal::V8::Initialize(NULL); int data[256]; - for (int i = 0; i < 256; i++) { data[i] = i * 0x01010101; } + for (int i = 0; i < 256; i++) { data[i] = (long)i * 0x01010101; } // Allocate an executable page of memory. size_t actual_size; debian/patches/0008_mksnapshot_stdout.patch0000664000000000000000000000111712160515514016100 0ustar Description: Redirect mksnapshot log to stdout armel builds typically fail at mksnapshot, for which it is useful to be able to get the actual log. Forwarded: not-needed Author: Jérémy Lal Last-Update: 2011-10-25 --- a/tools/gyp/v8.gyp +++ b/tools/gyp/v8.gyp @@ -136,7 +136,7 @@ 'variables': { 'mksnapshot_flags': [ '--log-snapshot-positions', - '--logfile', '<(INTERMEDIATE_DIR)/snapshot.log', + '--logfile', '-', ], }, 'conditions': [ debian/patches/0012_loongson_force_cache_flush.patch0000664000000000000000000000171212154461015017642 0ustar Description: Forced whole instruction cache flushing on Loongson. Workaround for instruction cache flushing malfunction on Loongson systems that occasionally cause failures under stress test conditions. Author: Dusan Milosavljevic Origin:upstream,https://github.com/paul99/v8m-rb/commit/ded6c2c2.patch Last-Update: 2012-06-13 --- a/src/mips/cpu-mips.cc +++ b/src/mips/cpu-mips.cc @@ -72,6 +72,13 @@ #else // ANDROID int res; // See http://www.linux-mips.org/wiki/Cacheflush_Syscall. + if (kArchVariant==kLoongson) { + // Force flushing of whole instruction cache on Loongson. This is a + // workaround for problem when under stress tests cache lines are not + // flushed through syscall for some reasons. + size_t iCacheSize = 64 * KB; + size = iCacheSize + 1; + } res = syscall(__NR_cacheflush, start, size, ICACHE); if (res) { V8_Fatal(__FILE__, __LINE__, "Failed to flush the instruction cache"); debian/patches/0099_powerpc_support.patch0000664000000000000000000756105012314735321015613 0ustar Description: Backport powerpc/ppc64/ppc64el support to 3.14 Author: Andrew Low Reviewed-By: Adam Conrad Last-Update: 2014-02-12 To reproduce this patch: git clone https://github.com/andrewlow/v8ppc.git && cd v8ppc git diff 6b10fef46edfb4dc2a7aed389d75574c40a14243 7d1b8b5d040a7d622bbde464ea29f6a7e89c90b6 Edit to remove hunk in src/checks.h that overlaps with 0013_gcc_48_compat.patch diff --git a/.gitignore b/.gitignore index 0bf9313..74057ea 100644 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,7 @@ *.sln *.so *.suo +*.swp *.user *.vcproj *.xcodeproj diff --git a/AUTHORS b/AUTHORS index 9c43bb5..ae029fe 100644 --- a/AUTHORS +++ b/AUTHORS @@ -7,6 +7,7 @@ Google Inc. Sigma Designs Inc. ARM Ltd. Hewlett-Packard Development Company, LP +IBM Corporation Igalia, S.L. Joyent, Inc. @@ -15,11 +16,13 @@ Alexander Botero-Lowry Alexander Karpinsky Alexandre Vassalotti Andreas Anyuru +Andrew Low Bert Belder Burcu Dogan Craig Schlenter Daniel Andersson Daniel James +David Eelsohn Derek J Conrod Dineel D Sule Erich Ocean @@ -37,6 +40,7 @@ Kun Zhang Martyn Capewell Mathias Bynens Matt Hanselman +Matthew Brandyberry Maxim Mossienko Michael Lutz Michael Smith diff --git a/Makefile b/Makefile index b65ea4c..95631dd 100644 --- a/Makefile +++ b/Makefile @@ -73,6 +73,10 @@ endif ifeq ($(extrachecks), off) GYPFLAGS += -Dv8_enable_extra_checks=0 endif +# extrachecks=off +ifeq ($(extrappcchecks), on) + GYPFLAGS += -Dv8_enable_extra_ppcchecks=1 +endif # gdbjit=on ifeq ($(gdbjit), on) GYPFLAGS += -Dv8_enable_gdbjit=1 @@ -115,6 +119,10 @@ endif ifeq ($(hardfp), on) GYPFLAGS += -Dv8_use_arm_eabi_hardfloat=true endif +# nativesim=true +ifeq ($(nativesim), true) + GYPFLAGS += -Dv8_native_sim=true +endif # ----------------- available targets: -------------------- # - "dependencies": pulls in external dependencies (currently: GYP) @@ -133,8 +141,8 @@ endif # Architectures and modes to be compiled. Consider these to be internal # variables, don't override them (use the targets instead). -ARCHES = ia32 x64 arm mipsel -DEFAULT_ARCHES = ia32 x64 arm +ARCHES = ia32 x64 arm ppc mipsel ppc64 +DEFAULT_ARCHES = ia32 x64 arm ppc ppc64 MODES = release debug ANDROID_ARCHES = android_ia32 android_arm diff --git a/README.md b/README.md new file mode 100644 index 0000000..5cdb418 --- /dev/null +++ b/README.md @@ -0,0 +1,10 @@ +v8ppc +===== + +Port of Google V8 javascript engine to PowerPC - PowerLinux and AIX. + +This branch of the code (libv8-3.14) is intended to match the 3.14.5.8 +level of V8 that is used by the libv8 library built as part of Ubuntu + +http://packages.ubuntu.com/source/trusty/libv8-3.14 + diff --git a/aix_gyp.patch b/aix_gyp.patch new file mode 100644 index 0000000..810132d --- /dev/null +++ b/aix_gyp.patch @@ -0,0 +1,62 @@ +--- build/gyp/pylib/gyp/common.py ++++ build/gyp/pylib/gyp/common.py +@@ -378,6 +378,8 @@ + return 'solaris' + if sys.platform.startswith('freebsd'): + return 'freebsd' ++ if sys.platform.startswith('aix'): ++ return 'aix' + + return 'linux' + +--- build/gyp/pylib/gyp/generator/make.py ++++ build/gyp/pylib/gyp/generator/make.py +@@ -200,6 +200,21 @@ + """ + + ++ LINK_COMMANDS_AIX = """\ ++ quiet_cmd_alink = AR($(TOOLSET)) $@ ++ cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) $(ARFLAGS.$(TOOLSET)) $@ $(filter %.o,$^) ++ ++ quiet_cmd_link = LINK($(TOOLSET)) $@ ++ cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS) ++ ++ quiet_cmd_solink = SOLINK($(TOOLSET)) $@ ++ cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS) ++ ++ quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@ ++ cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS) ++ """ ++ ++ + # Header of toplevel Makefile. + # This should go into the build tree, but it's easier to keep it here for now. + SHARED_HEADER = ("""\ + +--- build/gyp/pylib/gyp/generator/make.py ++++ build/gyp/pylib/gyp/generator/make.py +@@ -1933,6 +1948,10 @@ + cc_command=cc_host): + arflags_host = 'crsT' + ++ if flavor == 'aix': ++ arflags_target = '-Xany ' + arflags_target ++ arflags_host = '-Xany ' + arflags_host ++ + return { 'ARFLAGS.target': arflags_target, + 'ARFLAGS.host': arflags_host } + +--- build/gyp/pylib/gyp/generator/make.py ++++ build/gyp/pylib/gyp/generator/make.py +@@ -2026,6 +2045,10 @@ + elif flavor == 'freebsd': + header_params.update({ + 'flock': 'lockf', ++ }) ++ elif flavor == 'aix': ++ header_params.update({ ++ 'link_commands': LINK_COMMANDS_AIX, + }) + + header_params.update(RunSystemTests(flavor)) diff --git a/build/common.gypi b/build/common.gypi index 78888b8..4f046c0 100644 --- a/build/common.gypi +++ b/build/common.gypi @@ -70,9 +70,13 @@ 'v8_enable_disassembler%': 0, + 'v8_native_sim%': 'false', + # Enable extra checks in API functions and other strategic places. 'v8_enable_extra_checks%': 1, + 'v8_enable_extra_ppcchecks%': 0, + 'v8_enable_gdbjit%': 0, 'v8_object_print%': 0, @@ -117,6 +121,9 @@ ['v8_enable_extra_checks==1', { 'defines': ['ENABLE_EXTRA_CHECKS',], }], + ['v8_enable_extra_ppcchecks==1', { + 'defines': ['ENABLE_EXTRA_PPCCHECKS',], + }], ['v8_enable_gdbjit==1', { 'defines': ['ENABLE_GDB_JIT_INTERFACE',], }], @@ -129,6 +136,12 @@ ['v8_interpreted_regexp==1', { 'defines': ['V8_INTERPRETED_REGEXP',], }], + ['v8_native_sim=="true"', { + 'defines': [ + 'NATIVE_SIMULATION', + 'USE_SIMULATOR', + ], + }], ['v8_target_arch=="arm"', { 'defines': [ 'V8_TARGET_ARCH_ARM', @@ -171,6 +184,17 @@ }], ], }], # v8_target_arch=="arm" + ['v8_target_arch=="ppc"', { + 'defines': [ + 'V8_TARGET_ARCH_PPC', + ], + }], # v8_target_arch=="ppc" + ['v8_target_arch=="ppc64"', { + 'defines': [ + 'V8_TARGET_ARCH_PPC', + 'V8_TARGET_ARCH_PPC64', + ], + }], # v8_target_arch=="ppc64" ['v8_target_arch=="ia32"', { 'defines': [ 'V8_TARGET_ARCH_IA32', @@ -277,7 +301,7 @@ }, }], ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \ - or OS=="netbsd"', { + or OS=="netbsd" or OS=="aix"', { 'conditions': [ [ 'v8_no_strict_aliasing==1', { 'cflags': [ '-fno-strict-aliasing' ], @@ -290,7 +314,7 @@ ['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \ or OS=="netbsd" or OS=="mac" or OS=="android") and \ (v8_target_arch=="arm" or v8_target_arch=="ia32" or \ - v8_target_arch=="mipsel")', { + v8_target_arch=="mipsel" or v8_target_arch=="ppc")', { # Check whether the host compiler and target compiler support the # '-m32' option and set it if so. 'target_conditions': [ @@ -327,6 +351,20 @@ ['OS=="netbsd"', { 'cflags': [ '-I/usr/pkg/include' ], }], + ['OS=="aix"', { + # AIX is missing /usr/include/endian.h + 'defines': [ + '__LITTLE_ENDIAN=1234', + '__BIG_ENDIAN=4321', + '__BYTE_ORDER=__BIG_ENDIAN', + '__FLOAT_WORD_ORDER=__BIG_ENDIAN'], + 'conditions': [ + [ 'v8_target_arch=="ppc64"', { + 'cflags': [ '-maix64' ], + 'ldflags': [ '-maix64' ], + }], + ], + }], ], # conditions 'configurations': { 'Debug': { @@ -354,10 +392,14 @@ }, }, 'conditions': [ - ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', { + ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" \ + or OS=="aix"', { 'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter', '-Wnon-virtual-dtor', '-Woverloaded-virtual' ], }], + ['OS=="aix"', { + 'ldflags': [ '-Wl,-bbigtoc' ], + }], ['OS=="android"', { 'variables': { 'android_full_debug%': 1, @@ -377,7 +419,7 @@ 'Release': { 'conditions': [ ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" \ - or OS=="android"', { + or OS=="android" or OS=="aix"', { 'cflags!': [ '-O2', '-Os', diff --git a/build/standalone.gypi b/build/standalone.gypi index 7145a16..8389ef4 100644 --- a/build/standalone.gypi +++ b/build/standalone.gypi @@ -31,7 +31,6 @@ 'variables': { 'library%': 'static_library', 'component%': 'static_library', - 'visibility%': 'hidden', 'msvs_multi_core_compile%': '1', 'mac_deployment_target%': '10.5', 'variables': { @@ -39,7 +38,7 @@ 'variables': { 'conditions': [ ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or \ - OS=="netbsd" or OS=="mac"', { + OS=="netbsd" or OS=="mac" or OS=="aix"', { # This handles the Unix platforms we generally deal with. # Anything else gets passed through, which probably won't work # very well; such hosts should pass an explicit target_arch @@ -49,7 +48,7 @@ s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/;s/mips.*/mipsel/")', }, { # OS!="linux" and OS!="freebsd" and OS!="openbsd" and - # OS!="netbsd" and OS!="mac" + # OS!="netbsd" and OS!="mac" and OS!="aix" 'host_arch%': 'ia32', }], ], @@ -74,6 +73,12 @@ }, { 'want_separate_host_toolset': 0, }], + # + ['OS=="aix"', { + 'visibility%': '', + }, { + 'visibility%': 'hidden', + }], ], # Default ARM variable settings. 'armv7%': 1, @@ -85,12 +90,17 @@ 'configurations': { 'Debug': { 'cflags': [ '-g', '-O0' ], + 'conditions': [ + [ 'OS=="aix"', { + 'cflags': [ '-gxcoff' ], + }], + ], }, }, }, 'conditions': [ ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \ - or OS=="netbsd"', { + or OS=="netbsd" or OS=="aix"', { 'target_defaults': { 'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter', '-Wnon-virtual-dtor', '-pthread', '-fno-rtti', @@ -100,6 +110,9 @@ [ 'OS=="linux"', { 'cflags': [ '-ansi' ], }], + [ 'host_arch=="ppc64"', { + 'cflags': [ '-mminimal-toc' ], + }], [ 'visibility=="hidden"', { 'cflags': [ '-fvisibility=hidden' ], }], diff --git a/src/assembler.cc b/src/assembler.cc index d81d4ae..10ef522 100644 --- a/src/assembler.cc +++ b/src/assembler.cc @@ -61,6 +61,8 @@ #include "x64/assembler-x64-inl.h" #elif V8_TARGET_ARCH_ARM #include "arm/assembler-arm-inl.h" +#elif V8_TARGET_ARCH_PPC +#include "ppc/assembler-ppc-inl.h" #elif V8_TARGET_ARCH_MIPS #include "mips/assembler-mips-inl.h" #else @@ -75,6 +77,8 @@ #include "x64/regexp-macro-assembler-x64.h" #elif V8_TARGET_ARCH_ARM #include "arm/regexp-macro-assembler-arm.h" +#elif V8_TARGET_ARCH_PPC +#include "ppc/regexp-macro-assembler-ppc.h" #elif V8_TARGET_ARCH_MIPS #include "mips/regexp-macro-assembler-mips.h" #else // Unknown architecture. @@ -1064,6 +1068,8 @@ ExternalReference ExternalReference::re_check_stack_guard_state( function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState); #elif V8_TARGET_ARCH_ARM function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState); +#elif V8_TARGET_ARCH_PPC + function = FUNCTION_ADDR(RegExpMacroAssemblerPPC::CheckStackGuardState); #elif V8_TARGET_ARCH_MIPS function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState); #else @@ -1224,6 +1230,21 @@ double power_double_double(double x, double y) { int y_int = static_cast(y); if (y == y_int) return ldexp(1.0, y_int); } +#elif defined(_AIX) + // AIX has a custom implementation for pow. This handles certain + // special cases that are different. + if ((x == 0.0 || isinf(x)) && y != 0.0 && isfinite(y)) { + double f; + double result = ((x == 0.0) ^ (y > 0)) ? V8_INFINITY : 0; + /* retain sign if odd integer exponent */ + return ((modf(y, &f) == 0.0) && (static_cast(y) & 1)) ? + copysign(result, x) : result; + } + + if (x == 2.0) { + int y_int = static_cast(y); + if (y == y_int) return ldexp(1.0, y_int); + } #endif // The checks for special cases can be dropped in ia32 because it has already diff --git a/src/assembler.h b/src/assembler.h index a0e55cc..f72c8d3 100644 --- a/src/assembler.h +++ b/src/assembler.h @@ -829,31 +829,33 @@ class PreservePositionScope BASE_EMBEDDED { // ----------------------------------------------------------------------------- // Utility functions -inline bool is_intn(int x, int n) { - return -(1 << (n-1)) <= x && x < (1 << (n-1)); +inline bool is_intn(intptr_t x, int n) { + return -(1L << (n-1)) <= x && x < (1L << (n-1)); } -inline bool is_int8(int x) { return is_intn(x, 8); } -inline bool is_int16(int x) { return is_intn(x, 16); } -inline bool is_int18(int x) { return is_intn(x, 18); } -inline bool is_int24(int x) { return is_intn(x, 24); } +inline bool is_int8(intptr_t x) { return is_intn(x, 8); } +inline bool is_int16(intptr_t x) { return is_intn(x, 16); } +inline bool is_int18(intptr_t x) { return is_intn(x, 18); } +inline bool is_int24(intptr_t x) { return is_intn(x, 24); } +inline bool is_int26(intptr_t x) { return is_intn(x, 26); } -inline bool is_uintn(int x, int n) { - return (x & -(1 << n)) == 0; + +inline bool is_uintn(intptr_t x, int n) { + return (x & -(1L << n)) == 0; } -inline bool is_uint2(int x) { return is_uintn(x, 2); } -inline bool is_uint3(int x) { return is_uintn(x, 3); } -inline bool is_uint4(int x) { return is_uintn(x, 4); } -inline bool is_uint5(int x) { return is_uintn(x, 5); } -inline bool is_uint6(int x) { return is_uintn(x, 6); } -inline bool is_uint8(int x) { return is_uintn(x, 8); } -inline bool is_uint10(int x) { return is_uintn(x, 10); } -inline bool is_uint12(int x) { return is_uintn(x, 12); } -inline bool is_uint16(int x) { return is_uintn(x, 16); } -inline bool is_uint24(int x) { return is_uintn(x, 24); } -inline bool is_uint26(int x) { return is_uintn(x, 26); } -inline bool is_uint28(int x) { return is_uintn(x, 28); } +inline bool is_uint2(intptr_t x) { return is_uintn(x, 2); } +inline bool is_uint3(intptr_t x) { return is_uintn(x, 3); } +inline bool is_uint4(intptr_t x) { return is_uintn(x, 4); } +inline bool is_uint5(intptr_t x) { return is_uintn(x, 5); } +inline bool is_uint6(intptr_t x) { return is_uintn(x, 6); } +inline bool is_uint8(intptr_t x) { return is_uintn(x, 8); } +inline bool is_uint10(intptr_t x) { return is_uintn(x, 10); } +inline bool is_uint12(intptr_t x) { return is_uintn(x, 12); } +inline bool is_uint16(intptr_t x) { return is_uintn(x, 16); } +inline bool is_uint24(intptr_t x) { return is_uintn(x, 24); } +inline bool is_uint26(intptr_t x) { return is_uintn(x, 26); } +inline bool is_uint28(intptr_t x) { return is_uintn(x, 28); } inline int NumberOfBitsSet(uint32_t x) { unsigned int num_bits_set; diff --git a/src/atomicops.h b/src/atomicops.h index 1f0c44a..8c92f07 100644 --- a/src/atomicops.h +++ b/src/atomicops.h @@ -69,7 +69,8 @@ typedef intptr_t Atomic64; // Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or // Atomic64 routines below, depending on your architecture. -#if defined(__OpenBSD__) && defined(__i386__) +#if !defined(V8_HOST_ARCH_64_BIT) && \ + ((defined(__OpenBSD__) && defined(__i386__)) || defined(_AIX)) typedef Atomic32 AtomicWord; #else typedef intptr_t AtomicWord; @@ -162,6 +163,8 @@ Atomic64 Release_Load(volatile const Atomic64* ptr); #include "atomicops_internals_x86_gcc.h" #elif defined(__GNUC__) && defined(V8_HOST_ARCH_ARM) #include "atomicops_internals_arm_gcc.h" +#elif defined(__GNUC__) && defined(V8_HOST_ARCH_PPC) +#include "atomicops_internals_ppc_gcc.h" #elif defined(__GNUC__) && defined(V8_HOST_ARCH_MIPS) #include "atomicops_internals_mips_gcc.h" #else diff --git a/src/atomicops_internals_ppc_gcc.h b/src/atomicops_internals_ppc_gcc.h new file mode 100644 index 0000000..b4543f4 --- /dev/null +++ b/src/atomicops_internals_ppc_gcc.h @@ -0,0 +1,167 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// This file is an internal atomic implementation, use atomicops.h instead. +// + +#ifndef V8_ATOMICOPS_INTERNALS_PPC_H_ +#define V8_ATOMICOPS_INTERNALS_PPC_H_ + +namespace v8 { +namespace internal { + +inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + return(__sync_val_compare_and_swap( ptr, old_value, new_value)); +} + +inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, + Atomic32 new_value) { + Atomic32 old_value; + do { + old_value = *ptr; + } while (__sync_bool_compare_and_swap(ptr, old_value, new_value)); + return old_value; +} + +inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + return Barrier_AtomicIncrement(ptr, increment); +} + +inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + for (;;) { + Atomic32 old_value = *ptr; + Atomic32 new_value = old_value + increment; + if (__sync_bool_compare_and_swap(ptr, old_value, new_value)) { + return new_value; + // The exchange took place as expected. + } + // Otherwise, *ptr changed mid-loop and we need to retry. + } +} + +inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + return NoBarrier_CompareAndSwap(ptr, old_value, new_value); +} + +inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + return NoBarrier_CompareAndSwap(ptr, old_value, new_value); +} + +inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { + *ptr = value; +} + +inline void MemoryBarrier() { + __asm__ __volatile__("sync" : : : "memory"); +} + +inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { + *ptr = value; + MemoryBarrier(); +} + +inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { + MemoryBarrier(); + *ptr = value; +} + +inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { + return *ptr; +} + +inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { + Atomic32 value = *ptr; + MemoryBarrier(); + return value; +} + +inline Atomic32 Release_Load(volatile const Atomic32* ptr) { + MemoryBarrier(); + return *ptr; +} + +#ifdef V8_TARGET_ARCH_PPC64 +inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + return(__sync_val_compare_and_swap( ptr, old_value, new_value)); +} + +inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + return NoBarrier_CompareAndSwap(ptr, old_value, new_value); +} + +inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + return NoBarrier_CompareAndSwap(ptr, old_value, new_value); +} + +inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { + *ptr = value; +} + +inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { + *ptr = value; + MemoryBarrier(); +} + +inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { + MemoryBarrier(); + *ptr = value; +} + +inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { + return *ptr; +} + +inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { + Atomic64 value = *ptr; + MemoryBarrier(); + return value; +} + +inline Atomic64 Release_Load(volatile const Atomic64* ptr) { + MemoryBarrier(); + return *ptr; +} + +#endif + +} } // namespace v8::internal + +#endif // V8_ATOMICOPS_INTERNALS_PPC_GCC_H_ diff --git a/src/builtins.cc b/src/builtins.cc index df70cd4..de0bb41 100644 --- a/src/builtins.cc +++ b/src/builtins.cc @@ -1617,10 +1617,15 @@ void Builtins::SetUp(bool create_heap_objects) { const BuiltinDesc* functions = builtin_function_table.functions(); +#if V8_TARGET_ARCH_PPC64 + const int kBufferSize = 9 * KB; +#else + const int kBufferSize = 8 * KB; +#endif // For now we generate builtin adaptor code into a stack-allocated // buffer, before copying it into individual code objects. Be careful // with alignment, some platforms don't like unaligned code. - union { int force_alignment; byte buffer[8*KB]; } u; + union { int force_alignment; byte buffer[kBufferSize]; } u; // Traverse the list of builtins and generate an adaptor in a // separate code object for each one. diff --git a/src/checks.h b/src/checks.h index d0a0c2b..15abc89 100644 --- a/src/checks.h +++ b/src/checks.h @@ -292,4 +292,18 @@ extern bool FLAG_enable_slow_asserts; #define EXTRA_CHECK(condition) ((void) 0) #endif +// PENGUIN: Extra checks for PPC PORT +// - PPCPORT_UNIMPLEMENTED: for unimplemented features +// - PPCPORT_CHECK: for development phase +// - PPCPORT_UNSAFE_IMPLEMENTATION: unsafe implementation +#ifdef ENABLE_EXTRA_PPCCHECKS +#define PPCPORT_CHECK(condition) CHECK(condition) +#define PPCPORT_UNIMPLEMENTED() UNIMPLEMENTED() +#define PPCPORT_UNSAFE_IMPLEMENTATION() ((void)0) +#else +#define PPCPORT_CHECK(condition) ((void) 0) +#define PPCPORT_UNIMPLEMENTED() ((void) 0) +#define PPCPORT_UNSAFE_IMPLEMENTATION() ((void)0) +#endif + #endif // V8_CHECKS_H_ diff --git a/src/code-stubs.h b/src/code-stubs.h index a843841..7ac3cae 100644 --- a/src/code-stubs.h +++ b/src/code-stubs.h @@ -88,6 +88,18 @@ namespace internal { #define CODE_STUB_LIST_ARM(V) #endif +// List of code stubs only used on PPC platforms. +#ifdef V8_TARGET_ARCH_PPC +#define CODE_STUB_LIST_PPC(V) \ + V(GetProperty) \ + V(SetProperty) \ + V(InvokeBuiltin) \ + V(RegExpCEntry) \ + V(DirectCEntry) +#else +#define CODE_STUB_LIST_PPC(V) +#endif + // List of code stubs only used on MIPS platforms. #ifdef V8_TARGET_ARCH_MIPS #define CODE_STUB_LIST_MIPS(V) \ @@ -101,6 +113,7 @@ namespace internal { #define CODE_STUB_LIST(V) \ CODE_STUB_LIST_ALL_PLATFORMS(V) \ CODE_STUB_LIST_ARM(V) \ + CODE_STUB_LIST_PPC(V) \ CODE_STUB_LIST_MIPS(V) // Mode to overwrite BinaryExpression values. @@ -254,6 +267,8 @@ class RuntimeCallHelper { #include "x64/code-stubs-x64.h" #elif V8_TARGET_ARCH_ARM #include "arm/code-stubs-arm.h" +#elif V8_TARGET_ARCH_PPC +#include "ppc/code-stubs-ppc.h" #elif V8_TARGET_ARCH_MIPS #include "mips/code-stubs-mips.h" #else diff --git a/src/codegen.cc b/src/codegen.cc index 0163580..52cf992 100644 --- a/src/codegen.cc +++ b/src/codegen.cc @@ -200,7 +200,7 @@ void ArgumentsAccessStub::Generate(MacroAssembler* masm) { int CEntryStub::MinorKey() { int result = (save_doubles_ == kSaveFPRegs) ? 1 : 0; ASSERT(result_size_ == 1 || result_size_ == 2); -#ifdef _WIN64 +#if defined(_WIN64) || defined(V8_TARGET_ARCH_PPC64) return result | ((result_size_ == 1) ? 0 : 2); #else return result; diff --git a/src/codegen.h b/src/codegen.h index 08a777f..6eef38b 100644 --- a/src/codegen.h +++ b/src/codegen.h @@ -75,6 +75,8 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF }; #include "x64/codegen-x64.h" #elif V8_TARGET_ARCH_ARM #include "arm/codegen-arm.h" +#elif V8_TARGET_ARCH_PPC +#include "ppc/codegen-ppc.h" #elif V8_TARGET_ARCH_MIPS #include "mips/codegen-mips.h" #else diff --git a/src/conversions-inl.h b/src/conversions-inl.h index e272fe6..2872ee9 100644 --- a/src/conversions-inl.h +++ b/src/conversions-inl.h @@ -75,7 +75,11 @@ inline unsigned int FastD2UI(double x) { if (x < k2Pow52) { x += k2Pow52; uint32_t result; +#if __FLOAT_WORD_ORDER == __LITTLE_ENDIAN Address mantissa_ptr = reinterpret_cast
(&x); +#else + Address mantissa_ptr = reinterpret_cast
(&x) + kIntSize; +#endif // Copy least significant 32 bits of mantissa. memcpy(&result, mantissa_ptr, sizeof(result)); return negative ? ~result + 1 : result; diff --git a/src/d8.gyp b/src/d8.gyp index a8361e6..4fe8671 100644 --- a/src/d8.gyp +++ b/src/d8.gyp @@ -62,7 +62,8 @@ 'sources': [ 'd8-readline.cc' ], }], ['(OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="netbsd" \ - or OS=="openbsd" or OS=="solaris" or OS=="android")', { + or OS=="openbsd" or OS=="solaris" or OS=="android" \ + or OS=="aix")', { 'sources': [ 'd8-posix.cc', ] }], [ 'OS=="win"', { diff --git a/src/deoptimizer.h b/src/deoptimizer.h index f67f986..2a7227a 100644 --- a/src/deoptimizer.h +++ b/src/deoptimizer.h @@ -697,7 +697,11 @@ class SlotRef BASE_EMBEDDED { return Handle(Memory::Object_at(addr_)); case INT32: { +#if defined(V8_TARGET_ARCH_PPC64) && __BYTE_ORDER == __BIG_ENDIAN + int value = Memory::int32_at(addr_ + kIntSize); +#else int value = Memory::int32_at(addr_); +#endif if (Smi::IsValid(value)) { return Handle(Smi::FromInt(value)); } else { @@ -706,7 +710,11 @@ class SlotRef BASE_EMBEDDED { } case UINT32: { +#if defined(V8_TARGET_ARCH_PPC64) && __BYTE_ORDER == __BIG_ENDIAN + uint32_t value = Memory::uint32_at(addr_ + kIntSize); +#else uint32_t value = Memory::uint32_at(addr_); +#endif if (value <= static_cast(Smi::kMaxValue)) { return Handle(Smi::FromInt(static_cast(value))); } else { diff --git a/src/disassembler.cc b/src/disassembler.cc index 9f8b9a8..9cd94f5 100644 --- a/src/disassembler.cc +++ b/src/disassembler.cc @@ -158,7 +158,11 @@ static int DecodeIt(FILE* f, "%08" V8PRIxPTR " jump table entry %4" V8PRIdPTR, ptr, ptr - begin); +#if V8_TARGET_ARCH_PPC64 + pc += 8; +#else pc += 4; +#endif } else { decode_buffer[0] = '\0'; pc += d.InstructionDecode(decode_buffer, pc); diff --git a/src/execution.h b/src/execution.h index 9f5d9ff..5bdc135 100644 --- a/src/execution.h +++ b/src/execution.h @@ -258,7 +258,7 @@ class StackGuard { void EnableInterrupts(); void DisableInterrupts(); -#ifdef V8_TARGET_ARCH_X64 +#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_PPC64) static const uintptr_t kInterruptLimit = V8_UINT64_C(0xfffffffffffffffe); static const uintptr_t kIllegalLimit = V8_UINT64_C(0xfffffffffffffff8); #else diff --git a/src/flag-definitions.h b/src/flag-definitions.h index 4c7c090..764915e 100644 --- a/src/flag-definitions.h +++ b/src/flag-definitions.h @@ -442,6 +442,7 @@ DEFINE_bool(trace_parse, false, "trace parsing and preparsing") // simulator-arm.cc and simulator-mips.cc DEFINE_bool(trace_sim, false, "Trace simulator execution") +DEFINE_bool(trace_sim_stubs, false, "Trace simulator execution w/ stub markers") DEFINE_bool(check_icache, false, "Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions") @@ -449,6 +450,8 @@ DEFINE_int(sim_stack_alignment, 8, "Stack alingment in bytes in simulator (4 or 8, 8 is default)") // isolate.cc +DEFINE_bool(abort_on_uncaught_exception, false, + "abort program (dump core) when an uncaught exception is thrown") DEFINE_bool(trace_exception, false, "print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false, diff --git a/src/frames-inl.h b/src/frames-inl.h index 27a526c..f6cc9e7 100644 --- a/src/frames-inl.h +++ b/src/frames-inl.h @@ -38,6 +38,8 @@ #include "x64/frames-x64.h" #elif V8_TARGET_ARCH_ARM #include "arm/frames-arm.h" +#elif V8_TARGET_ARCH_PPC +#include "ppc/frames-ppc.h" #elif V8_TARGET_ARCH_MIPS #include "mips/frames-mips.h" #else diff --git a/src/full-codegen.h b/src/full-codegen.h index 89b51f9..eff4b29 100644 --- a/src/full-codegen.h +++ b/src/full-codegen.h @@ -125,6 +125,8 @@ class FullCodeGenerator: public AstVisitor { static const int kBackEdgeDistanceUnit = 162; #elif V8_TARGET_ARCH_ARM static const int kBackEdgeDistanceUnit = 142; +#elif V8_TARGET_ARCH_PPC + static const int kBackEdgeDistanceUnit = 142; #elif V8_TARGET_ARCH_MIPS static const int kBackEdgeDistanceUnit = 142; #else @@ -333,12 +335,18 @@ class FullCodeGenerator: public AstVisitor { Label* if_true, Label* if_false, Label* fall_through); -#else // All non-mips arch. +#elif defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64) + void Split(Condition cc, + Label* if_true, + Label* if_false, + Label* fall_through, + CRegister cr = cr7); +#else // All other arch. void Split(Condition cc, Label* if_true, Label* if_false, Label* fall_through); -#endif // V8_TARGET_ARCH_MIPS +#endif // Load the value of a known (PARAMETER, LOCAL, or CONTEXT) variable into // a register. Emits a context chain walk if if necessary (so does diff --git a/src/globals.h b/src/globals.h index babffbf..8529daf 100644 --- a/src/globals.h +++ b/src/globals.h @@ -56,6 +56,11 @@ #define V8_INFINITY HUGE_VAL #endif +#ifdef _AIX +#undef V8_INFINITY +#define V8_INFINITY (__builtin_inff()) +#endif + #include "../include/v8stdint.h" @@ -86,6 +91,13 @@ namespace internal { #elif defined(__MIPSEL__) #define V8_HOST_ARCH_MIPS 1 #define V8_HOST_ARCH_32_BIT 1 +#elif defined(__PPC__) || defined(_ARCH_PPC) +#define V8_HOST_ARCH_PPC 1 +#if defined(__PPC64__) || defined(_ARCH_PPC64) +#define V8_HOST_ARCH_64_BIT 1 +#else +#define V8_HOST_ARCH_32_BIT 1 +#endif #else #error Host architecture was not detected as supported by v8 #endif @@ -94,7 +106,8 @@ namespace internal { // in the same way as the host architecture, that is, target the native // environment as presented by the compiler. #if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_IA32) && \ - !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS) + !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS) && \ + !defined(V8_TARGET_ARCH_PPC) #if defined(_M_X64) || defined(__x86_64__) #define V8_TARGET_ARCH_X64 1 #elif defined(_M_IX86) || defined(__i386__) @@ -116,8 +129,9 @@ namespace internal { #error Target architecture x64 is only supported on x64 host #endif #if (defined(V8_TARGET_ARCH_ARM) && \ - !(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_ARM))) -#error Target architecture arm is only supported on arm and ia32 host + !(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_ARM) || \ + defined(V8_HOST_ARCH_PPC))) +#error Target architecture arm is only supported on arm, ppc and ia32 host #endif #if (defined(V8_TARGET_ARCH_MIPS) && \ !(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_MIPS))) @@ -131,6 +145,9 @@ namespace internal { #if (defined(V8_TARGET_ARCH_ARM) && !defined(V8_HOST_ARCH_ARM)) #define USE_SIMULATOR 1 #endif +#if (defined(V8_TARGET_ARCH_PPC) && !defined(V8_HOST_ARCH_PPC)) +#define USE_SIMULATOR 1 +#endif #if (defined(V8_TARGET_ARCH_MIPS) && !defined(V8_HOST_ARCH_MIPS)) #define USE_SIMULATOR 1 #endif @@ -190,6 +207,16 @@ typedef byte* Address; #define V8PRIdPTR V8_PTR_PREFIX "d" #define V8PRIuPTR V8_PTR_PREFIX "u" +// Fix for AIX define intptr_t as "long int": +#ifdef _AIX +#undef V8_PTR_PREFIX +#define V8_PTR_PREFIX "l" +#undef V8PRIdPTR +#define V8PRIdPTR "ld" +#undef V8PRIxPTR +#define V8PRIxPTR "lx" +#endif + // Fix for Mac OS X defining uintptr_t as "unsigned long": #if defined(__APPLE__) && defined(__MACH__) #undef V8PRIxPTR diff --git a/src/heap.cc b/src/heap.cc index e3fcb93..e5ca930 100644 --- a/src/heap.cc +++ b/src/heap.cc @@ -50,6 +50,10 @@ #include "v8threads.h" #include "v8utils.h" #include "vm-state-inl.h" +#if V8_TARGET_ARCH_PPC && !V8_INTERPRETED_REGEXP +#include "regexp-macro-assembler.h" +#include "ppc/regexp-macro-assembler-ppc.h" +#endif #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP #include "regexp-macro-assembler.h" #include "arm/regexp-macro-assembler-arm.h" diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc index 79550f3..408c1e6 100644 --- a/src/hydrogen-instructions.cc +++ b/src/hydrogen-instructions.cc @@ -36,6 +36,8 @@ #include "x64/lithium-x64.h" #elif V8_TARGET_ARCH_ARM #include "arm/lithium-arm.h" +#elif V8_TARGET_ARCH_PPC +#include "ppc/lithium-ppc.h" #elif V8_TARGET_ARCH_MIPS #include "mips/lithium-mips.h" #else @@ -966,8 +968,8 @@ HValue* HUnaryMathOperation::Canonicalize() { // introduced. if (value()->representation().IsInteger32()) return value(); -#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_IA32) || \ - defined(V8_TARGET_ARCH_X64) +#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_IA32) || \ + defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_PPC) if (value()->IsDiv() && (value()->UseCount() == 1)) { // TODO(2038): Implement this optimization for non ARM architectures. HDiv* hdiv = HDiv::cast(value()); diff --git a/src/hydrogen.cc b/src/hydrogen.cc index 8393e51..4000922 100644 --- a/src/hydrogen.cc +++ b/src/hydrogen.cc @@ -43,6 +43,8 @@ #include "x64/lithium-codegen-x64.h" #elif V8_TARGET_ARCH_ARM #include "arm/lithium-codegen-arm.h" +#elif V8_TARGET_ARCH_PPC +#include "ppc/lithium-codegen-ppc.h" #elif V8_TARGET_ARCH_MIPS #include "mips/lithium-codegen-mips.h" #else diff --git a/src/isolate.cc b/src/isolate.cc index 75e15a4..495d928 100644 --- a/src/isolate.cc +++ b/src/isolate.cc @@ -116,6 +116,8 @@ void ThreadLocalTop::Initialize() { #ifdef USE_SIMULATOR #ifdef V8_TARGET_ARCH_ARM simulator_ = Simulator::current(isolate_); +#elif V8_TARGET_ARCH_PPC + simulator_ = Simulator::current(isolate_); #elif V8_TARGET_ARCH_MIPS simulator_ = Simulator::current(isolate_); #endif @@ -1080,6 +1082,7 @@ bool Isolate::IsErrorObject(Handle obj) { return false; } +static int fatal_exception_depth = 0; void Isolate::DoThrow(Object* exception, MessageLocation* location) { ASSERT(!has_pending_exception()); @@ -1150,6 +1153,20 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) { thread_local_top()->pending_message_start_pos_ = location->start_pos(); thread_local_top()->pending_message_end_pos_ = location->end_pos(); } + + // If the abort-on-uncaught-exception flag is specified, abort on any + // exception not caught by JavaScript, even when an external handler is + // present. This flag is intended for use by JavaScript developers, so + // print a user-friendly stack trace (not an internal one). + if (fatal_exception_depth == 0 && + FLAG_abort_on_uncaught_exception && + (report_exception || can_be_caught_externally)) { + fatal_exception_depth++; + fprintf(stderr, "%s\n\nFROM\n", + *MessageHandler::GetLocalizedMessage(message_obj)); + PrintCurrentStackTrace(stderr); + OS::Abort(); + } } else if (location != NULL && !location->script().is_null()) { // We are bootstrapping and caught an error where the location is set // and we have a script for the location. @@ -1398,6 +1415,8 @@ char* Isolate::RestoreThread(char* from) { #ifdef USE_SIMULATOR #ifdef V8_TARGET_ARCH_ARM thread_local_top()->simulator_ = Simulator::current(this); +#elif V8_TARGET_ARCH_PPC + thread_local_top()->simulator_ = Simulator::current(this); #elif V8_TARGET_ARCH_MIPS thread_local_top()->simulator_ = Simulator::current(this); #endif @@ -1534,6 +1553,7 @@ Isolate::Isolate() thread_manager_->isolate_ = this; #if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \ + defined(V8_TARGET_ARCH_PPC) && !defined(__PPC__) || \ defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__) simulator_initialized_ = false; simulator_i_cache_ = NULL; @@ -1851,7 +1871,8 @@ bool Isolate::Init(Deserializer* des) { // Initialize other runtime facilities #if defined(USE_SIMULATOR) -#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) +#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) \ + || defined(V8_TARGET_ARCH_PPC) Simulator::Initialize(this); #endif #endif diff --git a/src/isolate.h b/src/isolate.h index b90191d..773fcaf 100644 --- a/src/isolate.h +++ b/src/isolate.h @@ -94,7 +94,9 @@ class Debugger; class DebuggerAgent; #endif -#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \ +#if defined(NATIVE_SIMULATION) || \ + !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \ + !defined(__PPC__) && defined(V8_TARGET_ARCH_PPC) || \ !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS) class Redirection; class Simulator; @@ -256,7 +258,8 @@ class ThreadLocalTop BASE_EMBEDDED { Address handler_; // try-blocks are chained through the stack #ifdef USE_SIMULATOR -#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) +#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_PPC) || \ + defined(V8_TARGET_ARCH_MIPS) Simulator* simulator_; #endif #endif // USE_SIMULATOR @@ -374,7 +377,9 @@ class Isolate { thread_id_(thread_id), stack_limit_(0), thread_state_(NULL), -#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \ +#if defined(NATIVE_SIMULATION) || \ + !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \ + !defined(__PPC__) && defined(V8_TARGET_ARCH_PPC) || \ !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS) simulator_(NULL), #endif @@ -387,7 +392,9 @@ class Isolate { ThreadState* thread_state() const { return thread_state_; } void set_thread_state(ThreadState* value) { thread_state_ = value; } -#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \ +#if defined(NATIVE_SIMULATION) || \ + !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \ + !defined(__PPC__) && defined(V8_TARGET_ARCH_PPC) || \ !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS) Simulator* simulator() const { return simulator_; } void set_simulator(Simulator* simulator) { @@ -405,7 +412,9 @@ class Isolate { uintptr_t stack_limit_; ThreadState* thread_state_; -#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \ +#if defined(NATIVE_SIMULATION) || \ + !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \ + !defined(__PPC__) && defined(V8_TARGET_ARCH_PPC) || \ !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS) Simulator* simulator_; #endif @@ -969,7 +978,9 @@ class Isolate { int* code_kind_statistics() { return code_kind_statistics_; } #endif -#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \ +#if defined(NATIVE_SIMULATION) || \ + defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \ + defined(V8_TARGET_ARCH_PPC) && !defined(__PPC__) || \ defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__) bool simulator_initialized() { return simulator_initialized_; } void set_simulator_initialized(bool initialized) { @@ -1249,7 +1260,9 @@ class Isolate { // Time stamp at initialization. double time_millis_at_init_; -#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \ +#if defined(NATIVE_SIMULATION) || \ + defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \ + defined(V8_TARGET_ARCH_PPC) && !defined(__PPC__) || \ defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__) bool simulator_initialized_; HashMap* simulator_i_cache_; diff --git a/src/jsregexp.cc b/src/jsregexp.cc index e59170d..47caf4e 100644 --- a/src/jsregexp.cc +++ b/src/jsregexp.cc @@ -50,6 +50,8 @@ #include "x64/regexp-macro-assembler-x64.h" #elif V8_TARGET_ARCH_ARM #include "arm/regexp-macro-assembler-arm.h" +#elif V8_TARGET_ARCH_PPC +#include "ppc/regexp-macro-assembler-ppc.h" #elif V8_TARGET_ARCH_MIPS #include "mips/regexp-macro-assembler-mips.h" #else @@ -6131,6 +6133,9 @@ RegExpEngine::CompilationResult RegExpEngine::Compile( #elif V8_TARGET_ARCH_ARM RegExpMacroAssemblerARM macro_assembler(mode, (data->capture_count + 1) * 2, zone); +#elif V8_TARGET_ARCH_PPC + RegExpMacroAssemblerPPC macro_assembler(mode, (data->capture_count + 1) * 2, + zone); #elif V8_TARGET_ARCH_MIPS RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2, zone); diff --git a/src/jsregexp.h b/src/jsregexp.h index 96825ce..f69a458 100644 --- a/src/jsregexp.h +++ b/src/jsregexp.h @@ -1352,6 +1352,13 @@ class BoyerMooreLookahead : public ZoneObject { // to match foo is generated only once (the traces have a common prefix). The // code to store the capture is deferred and generated (twice) after the places // where baz has been matched. + +#ifdef _AIX +#undef UNKNOWN +#undef FALSE +#undef TRUE +#endif + class Trace { public: // A value for a property that is either known to be true, know to be false, diff --git a/src/lithium-allocator-inl.h b/src/lithium-allocator-inl.h index 8f660ce..d47e66e 100644 --- a/src/lithium-allocator-inl.h +++ b/src/lithium-allocator-inl.h @@ -36,6 +36,8 @@ #include "x64/lithium-x64.h" #elif V8_TARGET_ARCH_ARM #include "arm/lithium-arm.h" +#elif V8_TARGET_ARCH_PPC +#include "ppc/lithium-ppc.h" #elif V8_TARGET_ARCH_MIPS #include "mips/lithium-mips.h" #else diff --git a/src/lithium-allocator.cc b/src/lithium-allocator.cc index 91a9811..07ebb75 100644 --- a/src/lithium-allocator.cc +++ b/src/lithium-allocator.cc @@ -37,6 +37,8 @@ #include "x64/lithium-x64.h" #elif V8_TARGET_ARCH_ARM #include "arm/lithium-arm.h" +#elif V8_TARGET_ARCH_PPC +#include "ppc/lithium-ppc.h" #elif V8_TARGET_ARCH_MIPS #include "mips/lithium-mips.h" #else diff --git a/src/lithium.cc b/src/lithium.cc index eb2198d..e7596fa 100644 --- a/src/lithium.cc +++ b/src/lithium.cc @@ -38,6 +38,9 @@ #elif V8_TARGET_ARCH_ARM #include "arm/lithium-arm.h" #include "arm/lithium-codegen-arm.h" +#elif V8_TARGET_ARCH_PPC +#include "ppc/lithium-ppc.h" +#include "ppc/lithium-codegen-ppc.h" #elif V8_TARGET_ARCH_MIPS #include "mips/lithium-mips.h" #include "mips/lithium-codegen-mips.h" diff --git a/src/log.cc b/src/log.cc index b049ffe..2f42ae1 100644 --- a/src/log.cc +++ b/src/log.cc @@ -1526,6 +1526,8 @@ void Logger::LogCodeInfo() { const char arch[] = "x64"; #elif V8_TARGET_ARCH_ARM const char arch[] = "arm"; +#elif V8_TARGET_ARCH_PPC + const char arch[] = "ppc"; #elif V8_TARGET_ARCH_MIPS const char arch[] = "mips"; #else diff --git a/src/macro-assembler.h b/src/macro-assembler.h index 11e2217..f01a5ce 100644 --- a/src/macro-assembler.h +++ b/src/macro-assembler.h @@ -58,6 +58,13 @@ const int kInvalidProtoDepth = -1; #include "arm/assembler-arm-inl.h" #include "code.h" // must be after assembler_*.h #include "arm/macro-assembler-arm.h" +#elif V8_TARGET_ARCH_PPC +#include "ppc/constants-ppc.h" +#include "assembler.h" +#include "ppc/assembler-ppc.h" +#include "ppc/assembler-ppc-inl.h" +#include "code.h" // must be after assembler_*.h +#include "ppc/macro-assembler-ppc.h" #elif V8_TARGET_ARCH_MIPS #include "mips/constants-mips.h" #include "assembler.h" diff --git a/src/objects-inl.h b/src/objects-inl.h index ea5a93f..958d815 100644 --- a/src/objects-inl.h +++ b/src/objects-inl.h @@ -1061,7 +1061,7 @@ bool Smi::IsValid(intptr_t value) { bool in_range = (value >= kMinValue) && (value <= kMaxValue); #endif -#ifdef V8_TARGET_ARCH_X64 +#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_PPC64) // To be representable as a long smi, the value must be a 32-bit integer. bool result = (value == static_cast(value)); #else @@ -2409,10 +2409,7 @@ uint32_t String::hash_field() { void String::set_hash_field(uint32_t value) { - WRITE_UINT32_FIELD(this, kHashFieldOffset, value); -#if V8_HOST_ARCH_64_BIT - WRITE_UINT32_FIELD(this, kHashFieldOffset + kIntSize, 0); -#endif + WRITE_INTPTR_FIELD(this, kHashFieldSlot, value); } @@ -3976,25 +3973,33 @@ SMI_ACCESSORS(SharedFunctionInfo, kStressDeoptCounterOffset) #else -#define PSEUDO_SMI_ACCESSORS_LO(holder, name, offset) \ - STATIC_ASSERT(holder::offset % kPointerSize == 0); \ - int holder::name() { \ - int value = READ_INT_FIELD(this, offset); \ - ASSERT(kHeapObjectTag == 1); \ - ASSERT((value & kHeapObjectTag) == 0); \ - return value >> 1; \ - } \ - void holder::set_##name(int value) { \ - ASSERT(kHeapObjectTag == 1); \ - ASSERT((value & 0xC0000000) == 0xC0000000 || \ - (value & 0xC0000000) == 0x000000000); \ - WRITE_INT_FIELD(this, \ - offset, \ - (value << 1) & ~kHeapObjectTag); \ +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define PSEUDO_SMI_LO_ALIGN 0 +#define PSEUDO_SMI_HI_ALIGN kIntSize +#else +#define PSEUDO_SMI_LO_ALIGN kIntSize +#define PSEUDO_SMI_HI_ALIGN 0 +#endif + +#define PSEUDO_SMI_ACCESSORS_LO(holder, name, offset) \ + STATIC_ASSERT(holder::offset % kPointerSize == PSEUDO_SMI_LO_ALIGN); \ + int holder::name() { \ + int value = READ_INT_FIELD(this, offset); \ + ASSERT(kHeapObjectTag == 1); \ + ASSERT((value & kHeapObjectTag) == 0); \ + return value >> 1; \ + } \ + void holder::set_##name(int value) { \ + ASSERT(kHeapObjectTag == 1); \ + ASSERT((value & 0xC0000000) == 0xC0000000 || \ + (value & 0xC0000000) == 0x000000000); \ + WRITE_INT_FIELD(this, \ + offset, \ + (value << 1) & ~kHeapObjectTag); \ } -#define PSEUDO_SMI_ACCESSORS_HI(holder, name, offset) \ - STATIC_ASSERT(holder::offset % kPointerSize == kIntSize); \ +#define PSEUDO_SMI_ACCESSORS_HI(holder, name, offset) \ + STATIC_ASSERT(holder::offset % kPointerSize == PSEUDO_SMI_HI_ALIGN); \ INT_ACCESSORS(holder, name, offset) diff --git a/src/objects.cc b/src/objects.cc index 37f8361..6f93592 100644 --- a/src/objects.cc +++ b/src/objects.cc @@ -7007,8 +7007,8 @@ static inline bool CompareRawStringContents(Vector a, Vector b) { // then we have to check that the strings are aligned before // comparing them blockwise. const int kAlignmentMask = sizeof(uint32_t) - 1; // NOLINT - uint32_t pa_addr = reinterpret_cast(pa); - uint32_t pb_addr = reinterpret_cast(pb); + uintptr_t pa_addr = reinterpret_cast(pa); + uintptr_t pb_addr = reinterpret_cast(pb); if (((pa_addr & kAlignmentMask) | (pb_addr & kAlignmentMask)) == 0) { #endif const int kStepSize = sizeof(int) / sizeof(Char); // NOLINT diff --git a/src/objects.h b/src/objects.h index 755dd42..c84c440 100644 --- a/src/objects.h +++ b/src/objects.h @@ -37,6 +37,8 @@ #include "unicode-inl.h" #if V8_TARGET_ARCH_ARM #include "arm/constants-arm.h" +#elif V8_TARGET_ARCH_PPC +#include "ppc/constants-ppc.h" #elif V8_TARGET_ARCH_MIPS #include "mips/constants-mips.h" #endif @@ -1344,8 +1346,13 @@ class HeapNumber: public HeapObject { // is a mixture of sign, exponent and mantissa. Our current platforms are all // little endian apart from non-EABI arm which is little endian with big // endian floating point word ordering! +#if __BYTE_ORDER == __LITTLE_ENDIAN static const int kMantissaOffset = kValueOffset; static const int kExponentOffset = kValueOffset + 4; +#elif __BYTE_ORDER == __BIG_ENDIAN + static const int kMantissaOffset = kValueOffset + 4; + static const int kExponentOffset = kValueOffset; +#endif static const int kSize = kValueOffset + kDoubleSize; static const uint32_t kSignMask = 0x80000000u; @@ -5844,10 +5851,11 @@ class SharedFunctionInfo: public HeapObject { // garbage collections. // To avoid wasting space on 64-bit architectures we use // the following trick: we group integer fields into pairs - // First integer in each pair is shifted left by 1. + // The least significant integer in each pair is shifted left by 1. // By doing this we guarantee that LSB of each kPointerSize aligned // word is not set and thus this word cannot be treated as pointer // to HeapObject during old space traversal. +#if __BYTE_ORDER == __LITTLE_ENDIAN static const int kLengthOffset = kAstNodeCountOffset + kPointerSize; static const int kFormalParameterCountOffset = @@ -5875,6 +5883,38 @@ class SharedFunctionInfo: public HeapObject { static const int kCountersOffset = kOptCountOffset + kIntSize; static const int kStressDeoptCounterOffset = kCountersOffset + kIntSize; +#elif __BYTE_ORDER == __BIG_ENDIAN + static const int kFormalParameterCountOffset = + kAstNodeCountOffset + kPointerSize; + static const int kLengthOffset = + kFormalParameterCountOffset + kIntSize; + + static const int kNumLiteralsOffset = + kLengthOffset + kIntSize; + static const int kExpectedNofPropertiesOffset = + kNumLiteralsOffset + kIntSize; + + static const int kStartPositionAndTypeOffset = + kExpectedNofPropertiesOffset + kIntSize; + static const int kEndPositionOffset = + kStartPositionAndTypeOffset + kIntSize; + + static const int kCompilerHintsOffset = + kEndPositionOffset + kIntSize; + static const int kFunctionTokenPositionOffset = + kCompilerHintsOffset + kIntSize; + + static const int kOptCountOffset = + kFunctionTokenPositionOffset + kIntSize; + static const int kThisPropertyAssignmentsCountOffset = + kOptCountOffset + kIntSize; + + static const int kStressDeoptCounterOffset = + kThisPropertyAssignmentsCountOffset + kIntSize; + static const int kCountersOffset = kStressDeoptCounterOffset + kIntSize; +#else +#error Unknown byte ordering +#endif // Total size. static const int kSize = kStressDeoptCounterOffset + kIntSize; @@ -7314,8 +7354,13 @@ class String: public HeapObject { // Layout description. static const int kLengthOffset = HeapObject::kHeaderSize; - static const int kHashFieldOffset = kLengthOffset + kPointerSize; - static const int kSize = kHashFieldOffset + kPointerSize; + static const int kHashFieldSlot = kLengthOffset + kPointerSize; +#if __BYTE_ORDER == __LITTLE_ENDIAN || !V8_HOST_ARCH_64_BIT + static const int kHashFieldOffset = kHashFieldSlot; +#else + static const int kHashFieldOffset = kHashFieldSlot + kIntSize; +#endif + static const int kSize = kHashFieldSlot + kPointerSize; // Maximum number of characters to consider when trying to convert a string // value into an array index. diff --git a/src/platform-aix.cc b/src/platform-aix.cc new file mode 100644 index 0000000..c739695 --- /dev/null +++ b/src/platform-aix.cc @@ -0,0 +1,894 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Platform specific code for AIX goes here. For the POSIX comaptible parts +// the implementation is in platform-posix.cc. + +#include +#include +#include +#include +#include +#include +#include +#include + +#include // mmap & munmap +#include // mmap & munmap +#include // open +#include // open +#include // getpagesize +#include // index +#include +#include +#include + +#undef MAP_TYPE + +#include "v8.h" +#include "v8threads.h" + +#include "platform-posix.h" +#include "platform.h" +#include "vm-state-inl.h" + + +namespace v8 { +namespace internal { + +// 0 is never a valid thread id on AIX since tids and pids share a +// name space and pid 0 is used to kill the group (see man 2 kill). +static const pthread_t kNoThread = (pthread_t) 0; + + +double ceiling(double x) { + // Correct as on OS X + if (-1.0 < x && x < 0.0) { + return -0.0; + } else { + return ceil(x); + } +} + + +static Mutex* limit_mutex = NULL; + + +void OS::PostSetUp() { + POSIXPostSetUp(); +} + + +void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) { + __asm__ __volatile__("" : : : "memory"); + *ptr = value; +} + + +uint64_t OS::CpuFeaturesImpliedByPlatform() { + return 0; // AIX runs on anything. +} + + +int OS::ActivationFrameAlignment() { + // 8 byte alignment on AIX + return 8; +} + + +const char* OS::LocalTimezone(double time) { + if (isnan(time)) return ""; + time_t tv = static_cast(floor(time/msPerSecond)); + struct tm* t = localtime(&tv); + if (NULL == t) return ""; + return tzname[0]; // The location of the timezone string on AIX. +} + + +double OS::LocalTimeOffset() { + // On AIX, struct tm does not contain a tm_gmtoff field. + time_t utc = time(NULL); + ASSERT(utc != -1); + struct tm* loc = localtime(&utc); + ASSERT(loc != NULL); + return static_cast((mktime(loc) - utc) * msPerSecond); +} + + +// We keep the lowest and highest addresses mapped as a quick way of +// determining that pointers are outside the heap (used mostly in assertions +// and verification). The estimate is conservative, i.e., not all addresses in +// 'allocated' space are actually allocated to our heap. The range is +// [lowest, highest), inclusive on the low and and exclusive on the high end. +static void* lowest_ever_allocated = reinterpret_cast(-1); +static void* highest_ever_allocated = reinterpret_cast(0); + + +static void UpdateAllocatedSpaceLimits(void* address, int size) { + ASSERT(limit_mutex != NULL); + ScopedLock lock(limit_mutex); + + lowest_ever_allocated = Min(lowest_ever_allocated, address); + highest_ever_allocated = + Max(highest_ever_allocated, + reinterpret_cast(reinterpret_cast(address) + size)); +} + + +bool OS::IsOutsideAllocatedSpace(void* address) { + return address < lowest_ever_allocated || address >= highest_ever_allocated; +} + + +size_t OS::AllocateAlignment() { + return getpagesize(); +} + + +void* OS::Allocate(const size_t requested, + size_t* allocated, + bool executable) { + const size_t msize = RoundUp(requested, getpagesize()); + int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0); + void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + + if (mbase == MAP_FAILED) { + LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed")); + return NULL; + } + *allocated = msize; + UpdateAllocatedSpaceLimits(mbase, msize); + return mbase; +} + + +void OS::Free(void* buf, const size_t length) { + // TODO(1240712): munmap has a return value which is ignored here. + int result = munmap(buf, length); + USE(result); + ASSERT(result == 0); +} + + +void OS::Sleep(int milliseconds) { + unsigned int ms = static_cast(milliseconds); + usleep(1000 * ms); +} + + +void OS::Abort() { + // Redirect to std abort to signal abnormal program termination. + abort(); +} + + +void OS::DebugBreak() { +#if (defined(__arm__) || defined(__thumb__)) +# if defined(CAN_USE_ARMV5_INSTRUCTIONS) + asm("bkpt 0"); +# endif +#elif defined(_ARCH_PPC) + asm("trap"); +#else + asm("int $3"); +#endif +} + + +class PosixMemoryMappedFile : public OS::MemoryMappedFile { + public: + PosixMemoryMappedFile(FILE* file, void* memory, int size) + : file_(file), memory_(memory), size_(size) { } + virtual ~PosixMemoryMappedFile(); + virtual void* memory() { return memory_; } + virtual int size() { return size_; } + private: + FILE* file_; + void* memory_; + int size_; +}; + + +OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { + FILE* file = fopen(name, "r+"); + if (file == NULL) return NULL; + + fseek(file, 0, SEEK_END); + int size = ftell(file); + + void* memory = + mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); + return new PosixMemoryMappedFile(file, memory, size); +} + + +OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, + void* initial) { + FILE* file = fopen(name, "w+"); + if (file == NULL) return NULL; + int result = fwrite(initial, size, 1, file); + if (result < 1) { + fclose(file); + return NULL; + } + void* memory = + mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); + return new PosixMemoryMappedFile(file, memory, size); +} + + +PosixMemoryMappedFile::~PosixMemoryMappedFile() { + if (memory_) munmap(memory_, size_); + fclose(file_); +} + + +static unsigned StringToLong(char* buffer) { + return static_cast(strtol(buffer, NULL, 16)); // NOLINT +} + + +void OS::LogSharedLibraryAddresses() { + static const int MAP_LENGTH = 1024; + int fd = open("/proc/self/maps", O_RDONLY); + if (fd < 0) return; + while (true) { + char addr_buffer[11]; + addr_buffer[0] = '0'; + addr_buffer[1] = 'x'; + addr_buffer[10] = 0; + int result = read(fd, addr_buffer + 2, 8); + if (result < 8) break; + unsigned start = StringToLong(addr_buffer); + result = read(fd, addr_buffer + 2, 1); + if (result < 1) break; + if (addr_buffer[2] != '-') break; + result = read(fd, addr_buffer + 2, 8); + if (result < 8) break; + unsigned end = StringToLong(addr_buffer); + char buffer[MAP_LENGTH]; + int bytes_read = -1; + do { + bytes_read++; + if (bytes_read >= MAP_LENGTH - 1) + break; + result = read(fd, buffer + bytes_read, 1); + if (result < 1) break; + } while (buffer[bytes_read] != '\n'); + buffer[bytes_read] = 0; + // Ignore mappings that are not executable. + if (buffer[3] != 'x') continue; + char* start_of_path = index(buffer, '/'); + // There may be no filename in this line. Skip to next. + if (start_of_path == NULL) continue; + buffer[bytes_read] = 0; + LOG(i::Isolate::Current(), SharedLibraryEvent(start_of_path, start, end)); + } + close(fd); +} + + +void OS::SignalCodeMovingGC() { +} + + +int OS::StackWalk(Vector frames) { + return 0; +} + + +// Constants used for mmap. +static const int kMmapFd = -1; +static const int kMmapFdOffset = 0; + +VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } + +VirtualMemory::VirtualMemory(size_t size) { + address_ = ReserveRegion(size); + size_ = size; +} + + +VirtualMemory::VirtualMemory(size_t size, size_t alignment) + : address_(NULL), size_(0) { + ASSERT(IsAligned(alignment, static_cast(OS::AllocateAlignment()))); + size_t request_size = RoundUp(size + alignment, + static_cast(OS::AllocateAlignment())); + void* reservation = mmap(OS::GetRandomMmapAddr(), + request_size, + PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS, + kMmapFd, + kMmapFdOffset); + if (reservation == MAP_FAILED) return; + + Address base = static_cast
(reservation); + Address aligned_base = RoundUp(base, alignment); + ASSERT_LE(base, aligned_base); + + // Unmap extra memory reserved before and after the desired block. + if (aligned_base != base) { + size_t prefix_size = static_cast(aligned_base - base); + OS::Free(base, prefix_size); + request_size -= prefix_size; + } + + size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); + ASSERT_LE(aligned_size, request_size); + + if (aligned_size != request_size) { + size_t suffix_size = request_size - aligned_size; + OS::Free(aligned_base + aligned_size, suffix_size); + request_size -= suffix_size; + } + + ASSERT(aligned_size == request_size); + + address_ = static_cast(aligned_base); + size_ = aligned_size; +} + + +VirtualMemory::~VirtualMemory() { + if (IsReserved()) { + bool result = ReleaseRegion(address(), size()); + ASSERT(result); + USE(result); + } +} + + +bool VirtualMemory::IsReserved() { + return address_ != NULL; +} + + +void VirtualMemory::Reset() { + address_ = NULL; + size_ = 0; +} + + +bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { + return CommitRegion(address, size, is_executable); +} + + +bool VirtualMemory::Uncommit(void* address, size_t size) { + return UncommitRegion(address, size); +} + + +bool VirtualMemory::Guard(void* address) { + OS::Guard(address, OS::CommitPageSize()); + return true; +} + + +void* VirtualMemory::ReserveRegion(size_t size) { + void* result = mmap(OS::GetRandomMmapAddr(), + size, + PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS, + kMmapFd, + kMmapFdOffset); + + if (result == MAP_FAILED) return NULL; + + return result; +} + + +bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { + int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); + if (mprotect(base, size, prot) == -1) return false; + + UpdateAllocatedSpaceLimits(base, size); + return true; +} + + +bool VirtualMemory::UncommitRegion(void* base, size_t size) { + return mprotect(base, size, PROT_NONE) != -1; +} + + +bool VirtualMemory::ReleaseRegion(void* base, size_t size) { + return munmap(base, size) == 0; +} + + +class Thread::PlatformData : public Malloced { + public: + pthread_t thread_; // Thread handle for pthread. +}; + + +Thread::Thread(const Options& options) + : data_(new PlatformData), + stack_size_(options.stack_size()) { + set_name(options.name()); +} + + +Thread::~Thread() { + delete data_; +} + + +static void* ThreadEntry(void* arg) { + Thread* thread = reinterpret_cast(arg); + // This is also initialized by the first argument to pthread_create() but we + // don't know which thread will run first (the original thread or the new + // one) so we initialize it here too. + thread->data()->thread_ = pthread_self(); + ASSERT(thread->data()->thread_ != kNoThread); + thread->Run(); + return NULL; +} + + +void Thread::set_name(const char* name) { + strncpy(name_, name, sizeof(name_)); + name_[sizeof(name_) - 1] = '\0'; +} + + +void Thread::Start() { + pthread_attr_t attr; + size_t stack_size = stack_size_; + + if (stack_size == 0) { + // Default is 96KB -- bump up to 2MB + stack_size = 2 * MB; + } + pthread_attr_init(&attr); + pthread_attr_setstacksize(&attr, static_cast(stack_size)); + pthread_create(&data_->thread_, &attr, ThreadEntry, this); + ASSERT(data_->thread_ != kNoThread); +} + + +void Thread::Join() { + pthread_join(data_->thread_, NULL); +} + + +Thread::LocalStorageKey Thread::CreateThreadLocalKey() { + pthread_key_t key; + int result = pthread_key_create(&key, NULL); + USE(result); + ASSERT(result == 0); + return static_cast(key); +} + + +void Thread::DeleteThreadLocalKey(LocalStorageKey key) { + pthread_key_t pthread_key = static_cast(key); + int result = pthread_key_delete(pthread_key); + USE(result); + ASSERT(result == 0); +} + + +void* Thread::GetThreadLocal(LocalStorageKey key) { + pthread_key_t pthread_key = static_cast(key); + return pthread_getspecific(pthread_key); +} + + +void Thread::SetThreadLocal(LocalStorageKey key, void* value) { + pthread_key_t pthread_key = static_cast(key); + pthread_setspecific(pthread_key, value); +} + + +void Thread::YieldCPU() { + sched_yield(); +} + + +class AIXMutex : public Mutex { + public: + AIXMutex() { + pthread_mutexattr_t attrs; + int result = pthread_mutexattr_init(&attrs); + ASSERT(result == 0); + result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE); + ASSERT(result == 0); + result = pthread_mutex_init(&mutex_, &attrs); + ASSERT(result == 0); + USE(result); + } + + virtual ~AIXMutex() { pthread_mutex_destroy(&mutex_); } + + virtual int Lock() { + int result = pthread_mutex_lock(&mutex_); + return result; + } + + virtual int Unlock() { + int result = pthread_mutex_unlock(&mutex_); + return result; + } + + virtual bool TryLock() { + int result = pthread_mutex_trylock(&mutex_); + // Return false if the lock is busy and locking failed. + if (result == EBUSY) { + return false; + } + ASSERT(result == 0); // Verify no other errors. + return true; + } + + private: + pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms. +}; + + +Mutex* OS::CreateMutex() { + return new AIXMutex(); +} + + +class AIXSemaphore : public Semaphore { + public: + explicit AIXSemaphore(int count) { sem_init(&sem_, 0, count); } + virtual ~AIXSemaphore() { sem_destroy(&sem_); } + + virtual void Wait(); + virtual bool Wait(int timeout); + virtual void Signal() { sem_post(&sem_); } + private: + sem_t sem_; +}; + + +void AIXSemaphore::Wait() { + while (true) { + int result = sem_wait(&sem_); + if (result == 0) return; // Successfully got semaphore. + CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup. + } +} + + +#ifndef TIMEVAL_TO_TIMESPEC +#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \ + (ts)->tv_sec = (tv)->tv_sec; \ + (ts)->tv_nsec = (tv)->tv_usec * 1000; \ +} while (false) +#endif + + +#ifndef timeradd +#define timeradd(a, b, result) \ + do { \ + (result)->tv_sec = (a)->tv_sec + (b)->tv_sec; \ + (result)->tv_usec = (a)->tv_usec + (b)->tv_usec; \ + if ((result)->tv_usec >= 1000000) { \ + ++(result)->tv_sec; \ + (result)->tv_usec -= 1000000; \ + } \ + } while (0) +#endif + + +bool AIXSemaphore::Wait(int timeout) { + const long kOneSecondMicros = 1000000; // NOLINT + + // Split timeout into second and nanosecond parts. + struct timeval delta; + delta.tv_usec = timeout % kOneSecondMicros; + delta.tv_sec = timeout / kOneSecondMicros; + + struct timeval current_time; + // Get the current time. + if (gettimeofday(¤t_time, NULL) == -1) { + return false; + } + + // Calculate time for end of timeout. + struct timeval end_time; + timeradd(¤t_time, &delta, &end_time); + + struct timespec ts; + TIMEVAL_TO_TIMESPEC(&end_time, &ts); + while (true) { + int result = sem_timedwait(&sem_, &ts); + if (result == 0) return true; // Successfully got semaphore. + if (result == -1 && errno == ETIMEDOUT) return false; // Timeout. + CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup. + } +} + + +Semaphore* OS::CreateSemaphore(int count) { + return new AIXSemaphore(count); +} + + +static pthread_t GetThreadID() { + pthread_t thread_id = pthread_self(); + return thread_id; +} + + +class Sampler::PlatformData : public Malloced { + public: + PlatformData() : vm_tid_(GetThreadID()) {} + + pthread_t vm_tid() const { return vm_tid_; } + + private: + pthread_t vm_tid_; +}; + + +static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { + USE(info); + if (signal != SIGPROF) return; + Isolate* isolate = Isolate::UncheckedCurrent(); + if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) { + // We require a fully initialized and entered isolate. + return; + } + if (v8::Locker::IsActive() && + !isolate->thread_manager()->IsLockedByCurrentThread()) { + return; + } + + Sampler* sampler = isolate->logger()->sampler(); + if (sampler == NULL || !sampler->IsActive()) return; + + TickSample sample_obj; + TickSample* sample = CpuProfiler::TickSampleEvent(isolate); + if (sample == NULL) sample = &sample_obj; + + // Extracting the sample from the context is extremely machine dependent. + ucontext_t* ucontext = reinterpret_cast(context); + mcontext_t& mcontext = ucontext->uc_mcontext; + sample->state = isolate->current_vm_state(); +#if V8_HOST_ARCH_IA32 + sample->pc = reinterpret_cast
(mcontext.mc_eip); + sample->sp = reinterpret_cast
(mcontext.mc_esp); + sample->fp = reinterpret_cast
(mcontext.mc_ebp); +#elif V8_HOST_ARCH_X64 + sample->pc = reinterpret_cast
(mcontext.mc_rip); + sample->sp = reinterpret_cast
(mcontext.mc_rsp); + sample->fp = reinterpret_cast
(mcontext.mc_rbp); +#elif V8_HOST_ARCH_ARM + sample->pc = reinterpret_cast
(mcontext.mc_r15); + sample->sp = reinterpret_cast
(mcontext.mc_r13); + sample->fp = reinterpret_cast
(mcontext.mc_r11); +#elif V8_HOST_ARCH_PPC + sample->pc = reinterpret_cast
(mcontext.jmp_context.iar); + sample->sp = reinterpret_cast
(mcontext.jmp_context.gpr[1]); + sample->fp = reinterpret_cast
(mcontext.jmp_context.gpr[1]); +#endif + sampler->SampleStack(sample); + sampler->Tick(sample); +} + + +class SignalSender : public Thread { + public: + enum SleepInterval { + HALF_INTERVAL, + FULL_INTERVAL + }; + + static const int kSignalSenderStackSize = 64 * KB; + + explicit SignalSender(int interval) + : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)), + interval_(interval) {} + + static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); } + static void TearDown() { delete mutex_; } + + static void AddActiveSampler(Sampler* sampler) { + ScopedLock lock(mutex_); + SamplerRegistry::AddActiveSampler(sampler); + if (instance_ == NULL) { + // Install a signal handler. + struct sigaction sa; + sa.sa_sigaction = ProfilerSignalHandler; + sigemptyset(&sa.sa_mask); + sa.sa_flags = SA_RESTART | SA_SIGINFO; + signal_handler_installed_ = + (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0); + + // Start a thread that sends SIGPROF signal to VM threads. + instance_ = new SignalSender(sampler->interval()); + instance_->Start(); + } else { + ASSERT(instance_->interval_ == sampler->interval()); + } + } + + static void RemoveActiveSampler(Sampler* sampler) { + ScopedLock lock(mutex_); + SamplerRegistry::RemoveActiveSampler(sampler); + if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) { + RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_); + delete instance_; + instance_ = NULL; + + // Restore the old signal handler. + if (signal_handler_installed_) { + sigaction(SIGPROF, &old_signal_handler_, 0); + signal_handler_installed_ = false; + } + } + } + + // Implement Thread::Run(). + virtual void Run() { + SamplerRegistry::State state; + while ((state = SamplerRegistry::GetState()) != + SamplerRegistry::HAS_NO_SAMPLERS) { + bool cpu_profiling_enabled = + (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS); + bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled(); + // When CPU profiling is enabled both JavaScript and C++ code is + // profiled. We must not suspend. + if (!cpu_profiling_enabled) { + if (rate_limiter_.SuspendIfNecessary()) continue; + } + if (cpu_profiling_enabled && runtime_profiler_enabled) { + if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) { + return; + } + Sleep(HALF_INTERVAL); + if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) { + return; + } + Sleep(HALF_INTERVAL); + } else { + if (cpu_profiling_enabled) { + if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, + this)) { + return; + } + } + if (runtime_profiler_enabled) { + if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, + NULL)) { + return; + } + } + Sleep(FULL_INTERVAL); + } + } + } + + static void DoCpuProfile(Sampler* sampler, void* raw_sender) { + if (!sampler->IsProfiling()) return; + SignalSender* sender = reinterpret_cast(raw_sender); + sender->SendProfilingSignal(sampler->platform_data()->vm_tid()); + } + + static void DoRuntimeProfile(Sampler* sampler, void* ignored) { + if (!sampler->isolate()->IsInitialized()) return; + sampler->isolate()->runtime_profiler()->NotifyTick(); + } + + void SendProfilingSignal(pthread_t tid) { + if (!signal_handler_installed_) return; + pthread_kill(tid, SIGPROF); + } + + void Sleep(SleepInterval full_or_half) { + // Convert ms to us and subtract 100 us to compensate delays + // occuring during signal delivery. + useconds_t interval = interval_ * 1000 - 100; + if (full_or_half == HALF_INTERVAL) interval /= 2; + int result = usleep(interval); +#ifdef DEBUG + if (result != 0 && errno != EINTR) { + fprintf(stderr, + "SignalSender usleep error; interval = %u, errno = %d\n", + interval, + errno); + ASSERT(result == 0 || errno == EINTR); + } +#endif + USE(result); + } + + const int interval_; + RuntimeProfilerRateLimiter rate_limiter_; + + // Protects the process wide state below. + static Mutex* mutex_; + static SignalSender* instance_; + static bool signal_handler_installed_; + static struct sigaction old_signal_handler_; + + private: + DISALLOW_COPY_AND_ASSIGN(SignalSender); +}; + +Mutex* SignalSender::mutex_ = NULL; +SignalSender* SignalSender::instance_ = NULL; +struct sigaction SignalSender::old_signal_handler_; +bool SignalSender::signal_handler_installed_ = false; + + +void OS::SetUp() { + // Seed the random number generator. + // Convert the current time to a 64-bit integer first, before converting it + // to an unsigned. Going directly can cause an overflow and the seed to be + // set to all ones. The seed will be identical for different instances that + // call this setup code within the same millisecond. + uint64_t seed = static_cast(TimeCurrentMillis()); + srandom(static_cast(seed)); + limit_mutex = CreateMutex(); + SignalSender::SetUp(); +} + + +void OS::TearDown() { + SignalSender::TearDown(); + delete limit_mutex; +} + + +Sampler::Sampler(Isolate* isolate, int interval) + : isolate_(isolate), + interval_(interval), + profiling_(false), + active_(false), + samples_taken_(0) { + data_ = new PlatformData; +} + + +Sampler::~Sampler() { + ASSERT(!IsActive()); + delete data_; +} + + +void Sampler::Start() { + ASSERT(!IsActive()); + SetActive(true); + SignalSender::AddActiveSampler(this); +} + + +void Sampler::Stop() { + ASSERT(IsActive()); + SignalSender::RemoveActiveSampler(this); + SetActive(false); +} + + +} } // namespace v8::internal diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc index 511759c..f8d04f5 100644 --- a/src/platform-freebsd.cc +++ b/src/platform-freebsd.cc @@ -698,6 +698,10 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { sample->pc = reinterpret_cast
(mcontext.mc_r15); sample->sp = reinterpret_cast
(mcontext.mc_r13); sample->fp = reinterpret_cast
(mcontext.mc_r11); +#elif V8_HOST_ARCH_PPC + sample->pc = reinterpret_cast
(mcontext.mc_r15); + sample->sp = reinterpret_cast
(mcontext.mc_r13); + sample->fp = reinterpret_cast
(mcontext.mc_r11); #endif sampler->SampleStack(sample); sampler->Tick(sample); diff --git a/src/platform-linux.cc b/src/platform-linux.cc index beb2cce..31cd279 100644 --- a/src/platform-linux.cc +++ b/src/platform-linux.cc @@ -291,6 +291,8 @@ int OS::ActivationFrameAlignment() { return 8; #elif V8_TARGET_ARCH_MIPS return 8; +#elif V8_TARGET_ARCH_PPC + return 8; #endif // With gcc 4.4 the tree vectorization optimizer can generate code // that requires 16 byte alignment such as movdqa on x86. @@ -300,6 +302,7 @@ int OS::ActivationFrameAlignment() { void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) { #if (defined(V8_TARGET_ARCH_ARM) && defined(__arm__)) || \ + (defined(V8_TARGET_ARCH_PPC) && defined(__PPC__)) || \ (defined(V8_TARGET_ARCH_MIPS) && defined(__mips__)) // Only use on ARM or MIPS hardware. MemoryBarrier(); @@ -409,6 +412,9 @@ void OS::DebugBreak() { # endif #elif defined(__mips__) asm("break"); +#elif defined(__PPC__) + asm("twge 2,2"); +// asm("nop"); // roohack - nothing for now; #else asm("int $3"); #endif @@ -813,7 +819,11 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) { void Thread::YieldCPU() { +#ifdef V8_TARGET_ARCH_PPC + i::OS::Sleep(0); +#else sched_yield(); +#endif } @@ -1039,7 +1049,9 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { // Extracting the sample from the context is extremely machine dependent. ucontext_t* ucontext = reinterpret_cast(context); +#ifndef V8_HOST_ARCH_PPC mcontext_t& mcontext = ucontext->uc_mcontext; +#endif sample->state = isolate->current_vm_state(); #if V8_HOST_ARCH_IA32 sample->pc = reinterpret_cast
(mcontext.gregs[REG_EIP]); @@ -1067,6 +1079,12 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { sample->pc = reinterpret_cast
(mcontext.pc); sample->sp = reinterpret_cast
(mcontext.gregs[29]); sample->fp = reinterpret_cast
(mcontext.gregs[30]); +#elif V8_HOST_ARCH_PPC + sample->pc = reinterpret_cast
(ucontext->uc_mcontext.regs->nip); + sample->sp = + reinterpret_cast
(ucontext->uc_mcontext.regs->gpr[PT_R1]); + sample->fp = + reinterpret_cast
(ucontext->uc_mcontext.regs->gpr[PT_R31]); #endif // V8_HOST_ARCH_* sampler->SampleStack(sample); sampler->Tick(sample); diff --git a/src/platform-posix.cc b/src/platform-posix.cc index 3bc8373..a193cb2 100644 --- a/src/platform-posix.cc +++ b/src/platform-posix.cc @@ -53,6 +53,10 @@ #include #endif +#if defined(_AIX) +#include +#endif + #include "v8.h" #include "codegen.h" @@ -112,9 +116,15 @@ void* OS::GetRandomMmapAddr() { // The range 0x20000000 - 0x60000000 is relatively unpopulated across a // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos // 10.6 and 10.7. + // The range 0x30000000 - 0xD0000000 is available on AIX; + // choose the upper range. raw_addr &= 0x3ffff000; +#ifdef _AIX + raw_addr += 0x90000000; +#else raw_addr += 0x20000000; #endif +#endif return reinterpret_cast(raw_addr); } return NULL; @@ -125,7 +135,17 @@ void* OS::GetRandomMmapAddr() { // Math functions double modulo(double x, double y) { +#if defined(_AIX) + // AIX raises an underflow exception for (Number.MIN_VALUE % Number.MAX_VALUE) + double result; + int exception; + feclearexcept(FE_ALL_EXCEPT); + result = fmod(x, y); + exception = fetestexcept(FE_UNDERFLOW); + return (exception ? x : result); +#else return fmod(x, y); +#endif } @@ -147,6 +167,11 @@ UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction()) #undef MATH_FUNCTION +#ifdef _AIX +#undef NAN +#define NAN (__builtin_nanf("")) +#endif + double OS::nan_value() { // NAN from math.h is defined in C99 and not in POSIX. return NAN; diff --git a/src/platform.h b/src/platform.h index de896ac..ffadecc 100644 --- a/src/platform.h +++ b/src/platform.h @@ -107,7 +107,8 @@ namespace internal { // Use AtomicWord for a machine-sized pointer. It is assumed that // reads and writes of naturally aligned values of this type are atomic. -#if defined(__OpenBSD__) && defined(__i386__) +#if !defined(V8_HOST_ARCH_64_BIT) && \ + ((defined(__OpenBSD__) && defined(__i386__)) || defined(_AIX)) typedef Atomic32 AtomicWord; #else typedef intptr_t AtomicWord; diff --git a/src/ppc/assembler-ppc-inl.h b/src/ppc/assembler-ppc-inl.h new file mode 100644 index 0000000..ea80c03 --- /dev/null +++ b/src/ppc/assembler-ppc-inl.h @@ -0,0 +1,457 @@ +// Copyright (c) 1994-2006 Sun Microsystems Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// - Redistribution in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the +// distribution. +// +// - Neither the name of Sun Microsystems or the names of contributors may +// be used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +// OF THE POSSIBILITY OF SUCH DAMAGE. + +// The original source code covered by the above license above has been modified +// significantly by Google Inc. +// Copyright 2012 the V8 project authors. All rights reserved. + +// +// Copyright IBM Corp. 2012, 2013. All rights reserved. +// + +#ifndef V8_PPC_ASSEMBLER_PPC_INL_H_ +#define V8_PPC_ASSEMBLER_PPC_INL_H_ + +#include "ppc/assembler-ppc.h" + +#include "cpu.h" +#include "debug.h" + + +namespace v8 { +namespace internal { + + +int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) { + int index = reg.code() - 1; // d0 is skipped + ASSERT(index < kNumAllocatableRegisters); + ASSERT(!reg.is(kDoubleRegZero)); + ASSERT(!reg.is(kScratchDoubleReg)); + return index; +} + +void RelocInfo::apply(intptr_t delta) { + if (RelocInfo::IsInternalReference(rmode_)) { + // absolute code pointer inside code object moves with the code object. + intptr_t* p = reinterpret_cast(pc_); + *p += delta; // relocate entry + CPU::FlushICache(p, sizeof(uintptr_t)); + } + // We do not use pc relative addressing on PPC, so there is + // nothing else to do. +} + + +Address RelocInfo::target_address() { + ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY); + return Assembler::target_address_at(pc_); +} + + +Address RelocInfo::target_address_address() { + ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY + || rmode_ == EMBEDDED_OBJECT + || rmode_ == EXTERNAL_REFERENCE); + + // Read the address of the word containing the target_address in an + // instruction stream. + // The only architecture-independent user of this function is the serializer. + // The serializer uses it to find out how many raw bytes of instruction to + // output before the next target. + // For an instruction like LIS/ADDIC where the target bits are mixed into the + // instruction bits, the size of the target will be zero, indicating that the + // serializer should not step forward in memory after a target is resolved + // and written. In this case the target_address_address function should + // return the end of the instructions to be patched, allowing the + // deserializer to deserialize the instructions as raw bytes and put them in + // place, ready to be patched with the target. + + return reinterpret_cast
( + pc_ + (Assembler::kInstructionsForPtrConstant * + Assembler::kInstrSize)); +} + + +int RelocInfo::target_address_size() { + return Assembler::kSpecialTargetSize; +} + + +void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) { + ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY); + Assembler::set_target_address_at(pc_, target); + if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) { + Object* target_code = Code::GetCodeFromTargetAddress(target); + host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( + host(), this, HeapObject::cast(target_code)); + } +} + + +Address Assembler::target_address_from_return_address(Address pc) { + return pc - kCallTargetAddressOffset; +} + + +Object* RelocInfo::target_object() { + ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); + return reinterpret_cast(Assembler::target_address_at(pc_)); +} + + +Handle RelocInfo::target_object_handle(Assembler* origin) { + ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); + return Handle(reinterpret_cast( + Assembler::target_address_at(pc_))); +} + + +Object** RelocInfo::target_object_address() { + ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); + reconstructed_obj_ptr_ = + reinterpret_cast(Assembler::target_address_at(pc_)); + return &reconstructed_obj_ptr_; +} + + +void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) { + ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); + Assembler::set_target_address_at(pc_, reinterpret_cast
(target)); + if (mode == UPDATE_WRITE_BARRIER && + host() != NULL && + target->IsHeapObject()) { + host()->GetHeap()->incremental_marking()->RecordWrite( + host(), &Memory::Object_at(pc_), HeapObject::cast(target)); + } +} + + +Address* RelocInfo::target_reference_address() { + ASSERT(rmode_ == EXTERNAL_REFERENCE); + reconstructed_adr_ptr_ = Assembler::target_address_at(pc_); + return &reconstructed_adr_ptr_; +} + + +Handle RelocInfo::target_cell_handle() { + ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL); + Address address = Memory::Address_at(pc_); + return Handle( + reinterpret_cast(address)); +} + + +JSGlobalPropertyCell* RelocInfo::target_cell() { + ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL); + return JSGlobalPropertyCell::FromValueAddress(Memory::Address_at(pc_)); +} + + +void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell, + WriteBarrierMode mode) { + ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL); + Address address = cell->address() + JSGlobalPropertyCell::kValueOffset; + Memory::Address_at(pc_) = address; + if (mode == UPDATE_WRITE_BARRIER && host() != NULL) { + // TODO(1550) We are passing NULL as a slot because cell can never be on + // evacuation candidate. + host()->GetHeap()->incremental_marking()->RecordWrite( + host(), NULL, cell); + } +} + + +Address RelocInfo::call_address() { + ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || + (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); + // The pc_ offset of 0 assumes patched return sequence per + // BreakLocationIterator::SetDebugBreakAtReturn(), or debug break + // slot per BreakLocationIterator::SetDebugBreakAtSlot(). + return Assembler::target_address_at(pc_); +} + + +void RelocInfo::set_call_address(Address target) { + ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || + (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); + Assembler::set_target_address_at(pc_, target); + if (host() != NULL) { + Object* target_code = Code::GetCodeFromTargetAddress(target); + host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( + host(), this, HeapObject::cast(target_code)); + } +} + + +Object* RelocInfo::call_object() { + return *call_object_address(); +} + + +void RelocInfo::set_call_object(Object* target) { + *call_object_address() = target; +} + + +Object** RelocInfo::call_object_address() { + ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || + (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); + return reinterpret_cast(pc_ + 2 * Assembler::kInstrSize); +} + + +bool RelocInfo::IsPatchedReturnSequence() { + // + // The patched return sequence is defined by + // BreakLocationIterator::SetDebugBreakAtReturn() + // FIXED_SEQUENCE + + Instr instr0 = Assembler::instr_at(pc_); + Instr instr1 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize); +#if V8_TARGET_ARCH_PPC64 + Instr instr3 = Assembler::instr_at(pc_ + (3 * Assembler::kInstrSize)); + Instr instr4 = Assembler::instr_at(pc_ + (4 * Assembler::kInstrSize)); + Instr binstr = Assembler::instr_at(pc_ + (7 * Assembler::kInstrSize)); +#else + Instr binstr = Assembler::instr_at(pc_ + 4 * Assembler::kInstrSize); +#endif + bool patched_return = ((instr0 & kOpcodeMask) == ADDIS && + (instr1 & kOpcodeMask) == ORI && +#if V8_TARGET_ARCH_PPC64 + (instr3 & kOpcodeMask) == ORIS && + (instr4 & kOpcodeMask) == ORI && +#endif + (binstr == 0x7d821008)); // twge r2, r2 + +// printf("IsPatchedReturnSequence: %d\n", patched_return); + return patched_return; +} + + +bool RelocInfo::IsPatchedDebugBreakSlotSequence() { + Instr current_instr = Assembler::instr_at(pc_); + return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP); +} + + +void RelocInfo::Visit(ObjectVisitor* visitor) { + RelocInfo::Mode mode = rmode(); + if (mode == RelocInfo::EMBEDDED_OBJECT) { + visitor->VisitEmbeddedPointer(this); + } else if (RelocInfo::IsCodeTarget(mode)) { + visitor->VisitCodeTarget(this); + } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) { + visitor->VisitGlobalPropertyCell(this); + } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { + visitor->VisitExternalReference(this); +#ifdef ENABLE_DEBUGGER_SUPPORT + // TODO(isolates): Get a cached isolate below. + } else if (((RelocInfo::IsJSReturn(mode) && + IsPatchedReturnSequence()) || + (RelocInfo::IsDebugBreakSlot(mode) && + IsPatchedDebugBreakSlotSequence())) && + Isolate::Current()->debug()->has_break_points()) { + visitor->VisitDebugTarget(this); +#endif + } else if (mode == RelocInfo::RUNTIME_ENTRY) { + visitor->VisitRuntimeEntry(this); + } +} + + +template +void RelocInfo::Visit(Heap* heap) { + RelocInfo::Mode mode = rmode(); + if (mode == RelocInfo::EMBEDDED_OBJECT) { + StaticVisitor::VisitEmbeddedPointer(heap, this); + } else if (RelocInfo::IsCodeTarget(mode)) { + StaticVisitor::VisitCodeTarget(heap, this); + } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) { + StaticVisitor::VisitGlobalPropertyCell(heap, this); + } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { + StaticVisitor::VisitExternalReference(this); +#ifdef ENABLE_DEBUGGER_SUPPORT + } else if (heap->isolate()->debug()->has_break_points() && + ((RelocInfo::IsJSReturn(mode) && + IsPatchedReturnSequence()) || + (RelocInfo::IsDebugBreakSlot(mode) && + IsPatchedDebugBreakSlotSequence()))) { + StaticVisitor::VisitDebugTarget(heap, this); +#endif + } else if (mode == RelocInfo::RUNTIME_ENTRY) { + StaticVisitor::VisitRuntimeEntry(this); + } +} + +Operand::Operand(intptr_t immediate, RelocInfo::Mode rmode) { + rm_ = no_reg; + imm_ = immediate; + rmode_ = rmode; +} + +Operand::Operand(const ExternalReference& f) { + rm_ = no_reg; + imm_ = reinterpret_cast(f.address()); + rmode_ = RelocInfo::EXTERNAL_REFERENCE; +} + +Operand::Operand(Smi* value) { + rm_ = no_reg; + imm_ = reinterpret_cast(value); + rmode_ = RelocInfo::NONE; +} + +Operand::Operand(Register rm) { + rm_ = rm; + rmode_ = RelocInfo::NONE; // PPC -why doesn't ARM do this? +} + +void Assembler::CheckBuffer() { + if (buffer_space() <= kGap) { + GrowBuffer(); + } +} + +void Assembler::CheckTrampolinePoolQuick() { + if (pc_offset() >= next_buffer_check_) { + CheckTrampolinePool(); + } +} + +void Assembler::emit(Instr x) { + CheckBuffer(); + *reinterpret_cast(pc_) = x; + pc_ += kInstrSize; + CheckTrampolinePoolQuick(); +} + +bool Operand::is_reg() const { + return rm_.is_valid(); +} + + +// Fetch the 32bit value from the FIXED_SEQUENCE lis/ori +Address Assembler::target_address_at(Address pc) { + Instr instr1 = instr_at(pc); + Instr instr2 = instr_at(pc + kInstrSize); +#if V8_TARGET_ARCH_PPC64 + Instr instr4 = instr_at(pc + (3*kInstrSize)); + Instr instr5 = instr_at(pc + (4*kInstrSize)); +#endif + // Interpret 2 instructions generated by lis/ori + if (IsLis(instr1) && IsOri(instr2)) { +#if V8_TARGET_ARCH_PPC64 + // Assemble the 64 bit value. + uint64_t hi = (static_cast((instr1 & kImm16Mask) << 16) | + static_cast(instr2 & kImm16Mask)); + uint64_t lo = (static_cast((instr4 & kImm16Mask) << 16) | + static_cast(instr5 & kImm16Mask)); + return reinterpret_cast
((hi << 32) | lo); +#else + // Assemble the 32 bit value. + return reinterpret_cast
( + ((instr1 & kImm16Mask) << 16) | (instr2 & kImm16Mask)); +#endif + } + + PPCPORT_UNIMPLEMENTED(); + return (Address)0; +} + + +// This sets the branch destination (which gets loaded at the call address). +// This is for calls and branches within generated code. The serializer +// has already deserialized the lis/ori instructions etc. +// There is a FIXED_SEQUENCE assumption here +void Assembler::deserialization_set_special_target_at( + Address instruction_payload, Address target) { + set_target_address_at( + instruction_payload - kInstructionsForPtrConstant * kInstrSize, + target); +} + +// This code assumes the FIXED_SEQUENCE of lis/ori +void Assembler::set_target_address_at(Address pc, Address target) { + Instr instr1 = instr_at(pc); + Instr instr2 = instr_at(pc + kInstrSize); + // Interpret 2 instructions generated by lis/ori + if (IsLis(instr1) && IsOri(instr2)) { +#if V8_TARGET_ARCH_PPC64 + Instr instr4 = instr_at(pc + (3*kInstrSize)); + Instr instr5 = instr_at(pc + (4*kInstrSize)); + // Needs to be fixed up when mov changes to handle 64-bit values. + uint32_t* p = reinterpret_cast(pc); + uintptr_t itarget = reinterpret_cast(target); + + instr5 &= ~kImm16Mask; + instr5 |= itarget & kImm16Mask; + itarget = itarget >> 16; + + instr4 &= ~kImm16Mask; + instr4 |= itarget & kImm16Mask; + itarget = itarget >> 16; + + instr2 &= ~kImm16Mask; + instr2 |= itarget & kImm16Mask; + itarget = itarget >> 16; + + instr1 &= ~kImm16Mask; + instr1 |= itarget & kImm16Mask; + itarget = itarget >> 16; + + *p = instr1; + *(p+1) = instr2; + *(p+3) = instr4; + *(p+4) = instr5; + CPU::FlushICache(p, 20); +#else + uint32_t* p = reinterpret_cast(pc); + uint32_t itarget = reinterpret_cast(target); + int lo_word = itarget & kImm16Mask; + int hi_word = itarget >> 16; + instr1 &= ~kImm16Mask; + instr1 |= hi_word; + instr2 &= ~kImm16Mask; + instr2 |= lo_word; + + *p = instr1; + *(p+1) = instr2; + CPU::FlushICache(p, 8); +#endif + } else { + UNREACHABLE(); + } +} + +} } // namespace v8::internal + +#endif // V8_PPC_ASSEMBLER_PPC_INL_H_ diff --git a/src/ppc/assembler-ppc.cc b/src/ppc/assembler-ppc.cc new file mode 100644 index 0000000..ecfa1b4 --- /dev/null +++ b/src/ppc/assembler-ppc.cc @@ -0,0 +1,1881 @@ +// Copyright (c) 1994-2006 Sun Microsystems Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// - Redistribution in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the +// distribution. +// +// - Neither the name of Sun Microsystems or the names of contributors may +// be used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +// OF THE POSSIBILITY OF SUCH DAMAGE. + +// The original source code covered by the above license above has been +// modified significantly by Google Inc. +// Copyright 2012 the V8 project authors. All rights reserved. + +// +// Copyright IBM Corp. 2012, 2013. All rights reserved. +// + +#include "v8.h" + +#if defined(V8_TARGET_ARCH_PPC) + +#include "ppc/assembler-ppc-inl.h" +#include "serialize.h" + +namespace v8 { +namespace internal { + +#ifdef DEBUG +bool CpuFeatures::initialized_ = false; +#endif +unsigned CpuFeatures::supported_ = 0; +unsigned CpuFeatures::found_by_runtime_probing_ = 0; + +// Get the CPU features enabled by the build. +static unsigned CpuFeaturesImpliedByCompiler() { + unsigned answer = 0; + return answer; +} + +#if !defined(_AIX) +// This function uses types in elf.h +static bool is_processor(const char* p) { + static bool read_tried = false; + static char *auxv_cpu_type = NULL; + + if (!read_tried) { + // Open the AUXV (auxilliary vector) psuedo-file + int fd = open("/proc/self/auxv", O_RDONLY); + + read_tried = true; + if (fd != -1) { +#if V8_TARGET_ARCH_PPC64 + static Elf64_auxv_t buffer[16]; + Elf64_auxv_t *auxv_element; +#else + static Elf32_auxv_t buffer[16]; + Elf32_auxv_t *auxv_element; +#endif + int bytes_read = 0; + while (bytes_read >= 0) { + // Read a chunk of the AUXV + bytes_read = read(fd, buffer, sizeof(buffer)); + // Locate and read the platform field of AUXV if it is in the chunk + for (auxv_element = buffer; + auxv_element+sizeof(auxv_element) <= buffer+bytes_read && + auxv_element->a_type != AT_NULL; + auxv_element++) { + if (auxv_element->a_type == AT_PLATFORM) { + /* Note: Both auxv_cpu_type and buffer are static */ + auxv_cpu_type = reinterpret_cast(auxv_element->a_un.a_val); + goto done_reading; + } + } + } + done_reading: + close(fd); + } + } + + if (auxv_cpu_type == NULL) { + return false; + } + return (strcmp(auxv_cpu_type, p) == 0); +} +#endif + +void CpuFeatures::Probe() { + unsigned standard_features = static_cast( + OS::CpuFeaturesImpliedByPlatform()) | CpuFeaturesImpliedByCompiler(); + ASSERT(supported_ == 0 || supported_ == standard_features); +#ifdef DEBUG + initialized_ = true; +#endif + + // Get the features implied by the OS and the compiler settings. This is the + // minimal set of features which is also alowed for generated code in the + // snapshot. + supported_ |= standard_features; + + if (Serializer::enabled()) { + // No probing for features if we might serialize (generate snapshot). + return; + } + + // Detect whether frim instruction is supported (POWER5+) + // For now we will just check for processors we know do not + // support it +#if !defined(_AIX) + if (!is_processor("ppc970") /* G5 */ && !is_processor("ppc7450") /* G4 */) { + // Assume support + supported_ |= (1u << FPU); + } +#else + // Fallback: assume frim is supported -- will implement processor + // detection for other PPC platforms in is_processor() if required + supported_ |= (1u << FPU); +#endif +} + +Register ToRegister(int num) { + ASSERT(num >= 0 && num < kNumRegisters); + const Register kRegisters[] = { + r0, + sp, + r2, r3, r4, r5, r6, r7, r8, r9, r10, + r11, ip, r13, r14, r15, + r16, r17, r18, r19, r20, r21, r22, r23, r24, + r25, r26, r27, r28, r29, r30, fp + }; + return kRegisters[num]; +} + + +// ----------------------------------------------------------------------------- +// Implementation of RelocInfo + +const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE; + + +bool RelocInfo::IsCodedSpecially() { + // The deserializer needs to know whether a pointer is specially + // coded. Being specially coded on PPC means that it is a lis/ori + // instruction sequence, and that is always the case inside code + // objects. + return true; +} + + +void RelocInfo::PatchCode(byte* instructions, int instruction_count) { + // Patch the code at the current address with the supplied instructions. + Instr* pc = reinterpret_cast(pc_); + Instr* instr = reinterpret_cast(instructions); + for (int i = 0; i < instruction_count; i++) { + *(pc + i) = *(instr + i); + } + + // Indicate that code has changed. + CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize); +} + + +// Patch the code at the current PC with a call to the target address. +// Additional guard instructions can be added if required. +void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) { + // Patch the code at the current address with a call to the target. + UNIMPLEMENTED(); +} + + +// ----------------------------------------------------------------------------- +// Implementation of Operand and MemOperand +// See assembler-ppc-inl.h for inlined constructors + +Operand::Operand(Handle handle) { + rm_ = no_reg; + // Verify all Objects referred by code are NOT in new space. + Object* obj = *handle; + ASSERT(!HEAP->InNewSpace(obj)); + if (obj->IsHeapObject()) { + imm_ = reinterpret_cast(handle.location()); + rmode_ = RelocInfo::EMBEDDED_OBJECT; + } else { + // no relocation needed + imm_ = reinterpret_cast(obj); + rmode_ = RelocInfo::NONE; + } +} + +MemOperand::MemOperand(Register rn, int32_t offset) { + ra_ = rn; + rb_ = no_reg; + offset_ = offset; +} + +MemOperand::MemOperand(Register ra, Register rb) { + ra_ = ra; + rb_ = rb; + offset_ = 0; +} + +// ----------------------------------------------------------------------------- +// Specific instructions, constants, and masks. + +// Spare buffer. +static const int kMinimalBufferSize = 4*KB; + + +Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size) + : AssemblerBase(arg_isolate), + recorded_ast_id_(TypeFeedbackId::None()), + positions_recorder_(this), + emit_debug_code_(FLAG_debug_code), + predictable_code_size_(false) { + if (buffer == NULL) { + // Do our own buffer management. + if (buffer_size <= kMinimalBufferSize) { + buffer_size = kMinimalBufferSize; + + if (isolate()->assembler_spare_buffer() != NULL) { + buffer = isolate()->assembler_spare_buffer(); + isolate()->set_assembler_spare_buffer(NULL); + } + } + if (buffer == NULL) { + buffer_ = NewArray(buffer_size); + } else { + buffer_ = static_cast(buffer); + } + buffer_size_ = buffer_size; + own_buffer_ = true; + + } else { + // Use externally provided buffer instead. + ASSERT(buffer_size > 0); + buffer_ = static_cast(buffer); + buffer_size_ = buffer_size; + own_buffer_ = false; + } + + // Set up buffer pointers. + ASSERT(buffer_ != NULL); + pc_ = buffer_; + reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); + + no_trampoline_pool_before_ = 0; + trampoline_pool_blocked_nesting_ = 0; + // We leave space (kMaxBlockTrampolineSectionSize) + // for BlockTrampolinePoolScope buffer. + next_buffer_check_ = kMaxCondBranchReach - kMaxBlockTrampolineSectionSize; + internal_trampoline_exception_ = false; + last_bound_pos_ = 0; + + trampoline_emitted_ = false; + unbound_labels_count_ = 0; + + ClearRecordedAstId(); +} + + +Assembler::~Assembler() { + if (own_buffer_) { + if (isolate()->assembler_spare_buffer() == NULL && + buffer_size_ == kMinimalBufferSize) { + isolate()->set_assembler_spare_buffer(buffer_); + } else { + DeleteArray(buffer_); + } + } +} + + +void Assembler::GetCode(CodeDesc* desc) { + // Set up code descriptor. + desc->buffer = buffer_; + desc->buffer_size = buffer_size_; + desc->instr_size = pc_offset(); + desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); +} + + +void Assembler::Align(int m) { + ASSERT(m >= 4 && IsPowerOf2(m)); + while ((pc_offset() & (m - 1)) != 0) { + nop(); + } +} + + +void Assembler::CodeTargetAlign() { + Align(8); +} + + +Condition Assembler::GetCondition(Instr instr) { + switch (instr & kCondMask) { + case BT: + return eq; + case BF: + return ne; + default: + UNIMPLEMENTED(); + } + return al; +} + +// PowerPC + +bool Assembler::IsLis(Instr instr) { + return (instr & kOpcodeMask) == ADDIS; +} + +bool Assembler::IsAddic(Instr instr) { + return (instr & kOpcodeMask) == ADDIC; +} + +bool Assembler::IsOri(Instr instr) { + return (instr & kOpcodeMask) == ORI; +} + + +bool Assembler::IsBranch(Instr instr) { + return ((instr & kOpcodeMask) == BCX); +} + +// end PowerPC + +Register Assembler::GetRA(Instr instr) { + Register reg; + reg.code_ = Instruction::RAValue(instr); + return reg; +} + +Register Assembler::GetRB(Instr instr) { + Register reg; + reg.code_ = Instruction::RBValue(instr); + return reg; +} + +#if V8_TARGET_ARCH_PPC64 +// This code assumes a FIXED_SEQUENCE for 64bit loads (lis/ori) +bool Assembler::Is64BitLoadIntoR12(Instr instr1, Instr instr2, + Instr instr3, Instr instr4, Instr instr5) { + // Check the instructions are indeed a five part load (into r12) + // 3d800000 lis r12, 0 + // 618c0000 ori r12, r12, 0 + // 798c07c6 rldicr r12, r12, 32, 31 + // 658c00c3 oris r12, r12, 195 + // 618ccd40 ori r12, r12, 52544 + return(((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c) && + (instr3 == 0x798c07c6) && + ((instr4 >> 16) == 0x658c) && ((instr5 >> 16) == 0x618c)); +} +#else +// This code assumes a FIXED_SEQUENCE for 32bit loads (lis/ori) +bool Assembler::Is32BitLoadIntoR12(Instr instr1, Instr instr2) { + // Check the instruction is indeed a two part load (into r12) + // 3d802553 lis r12, 9555 + // 618c5000 ori r12, r12, 20480 + return(((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c)); +} +#endif + +bool Assembler::IsCmpRegister(Instr instr) { + return (((instr & kOpcodeMask) == EXT2) && + ((instr & kExt2OpcodeMask) == CMP)); +} + +bool Assembler::IsRlwinm(Instr instr) { + return ((instr & kOpcodeMask) == RLWINMX); +} + +#if V8_TARGET_ARCH_PPC64 +bool Assembler::IsRldicl(Instr instr) { + return (((instr & kOpcodeMask) == EXT5) && + ((instr & kExt5OpcodeMask) == RLDICL)); +} +#endif + +bool Assembler::IsCmpImmediate(Instr instr) { + return ((instr & kOpcodeMask) == CMPI); +} + +Register Assembler::GetCmpImmediateRegister(Instr instr) { + ASSERT(IsCmpImmediate(instr)); + return GetRA(instr); +} + +int Assembler::GetCmpImmediateRawImmediate(Instr instr) { + ASSERT(IsCmpImmediate(instr)); + return instr & kOff16Mask; +} + +// Labels refer to positions in the (to be) generated code. +// There are bound, linked, and unused labels. +// +// Bound labels refer to known positions in the already +// generated code. pos() is the position the label refers to. +// +// Linked labels refer to unknown positions in the code +// to be generated; pos() is the position of the last +// instruction using the label. + + +// The link chain is terminated by a negative code position (must be aligned) +const int kEndOfChain = -4; + + +int Assembler::target_at(int pos) { + Instr instr = instr_at(pos); + // check which type of branch this is 16 or 26 bit offset + int opcode = instr & kOpcodeMask; + if (BX == opcode) { + int imm26 = ((instr & kImm26Mask) << 6) >> 6; + imm26 &= ~(kAAMask|kLKMask); // discard AA|LK bits if present + if (imm26 == 0) + return kEndOfChain; + return pos + imm26; + } else if (BCX == opcode) { + int imm16 = SIGN_EXT_IMM16((instr & kImm16Mask)); + imm16 &= ~(kAAMask|kLKMask); // discard AA|LK bits if present + if (imm16 == 0) + return kEndOfChain; + return pos + imm16; + } else if ((instr & ~kImm16Mask) == 0) { + // Emitted label constant, not part of a branch (regexp PushBacktrack). + if (instr == 0) { + return kEndOfChain; + } else { + int32_t imm16 = SIGN_EXT_IMM16(instr); + return (imm16 + pos); + } + } + + PPCPORT_UNIMPLEMENTED(); + ASSERT(false); + return -1; +} + +void Assembler::target_at_put(int pos, int target_pos) { + Instr instr = instr_at(pos); + int opcode = instr & kOpcodeMask; + + // check which type of branch this is 16 or 26 bit offset + if (BX == opcode) { + int imm26 = target_pos - pos; + ASSERT((imm26 & (kAAMask|kLKMask)) == 0); + instr &= ((~kImm26Mask)|kAAMask|kLKMask); + ASSERT(is_int26(imm26)); + instr_at_put(pos, instr | (imm26 & kImm26Mask)); + return; + } else if (BCX == opcode) { + int imm16 = target_pos - pos; + ASSERT((imm16 & (kAAMask|kLKMask)) == 0); + instr &= ((~kImm16Mask)|kAAMask|kLKMask); + ASSERT(is_int16(imm16)); + instr_at_put(pos, instr | (imm16 & kImm16Mask)); + return; + } else if ((instr & ~kImm16Mask) == 0) { + ASSERT(target_pos == kEndOfChain || target_pos >= 0); + // Emitted label constant, not part of a branch. + // Make label relative to Code* of generated Code object. + instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag)); + return; + } + + ASSERT(false); +} + +int Assembler::max_reach_from(int pos) { + Instr instr = instr_at(pos); + int opcode = instr & kOpcodeMask; + + // check which type of branch this is 16 or 26 bit offset + if (BX == opcode) { + return 26; + } else if (BCX == opcode) { + return 16; + } else if ((instr & ~kImm16Mask) == 0) { + // Emitted label constant, not part of a branch (regexp PushBacktrack). + return 16; + } + + ASSERT(false); + return 0; +} + +void Assembler::bind_to(Label* L, int pos) { + ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position + int32_t trampoline_pos = kInvalidSlotPos; + if (L->is_linked() && !trampoline_emitted_) { + unbound_labels_count_--; + next_buffer_check_ += kTrampolineSlotsSize; + } + + while (L->is_linked()) { + int fixup_pos = L->pos(); + int32_t offset = pos - fixup_pos; + int maxReach = max_reach_from(fixup_pos); + next(L); // call next before overwriting link with target at fixup_pos + if (is_intn(offset, maxReach) == false) { + if (trampoline_pos == kInvalidSlotPos) { + trampoline_pos = get_trampoline_entry(); + CHECK(trampoline_pos != kInvalidSlotPos); + target_at_put(trampoline_pos, pos); + } + target_at_put(fixup_pos, trampoline_pos); + } else { + target_at_put(fixup_pos, pos); + } + } + L->bind_to(pos); + + // Keep track of the last bound label so we don't eliminate any instructions + // before a bound label. + if (pos > last_bound_pos_) + last_bound_pos_ = pos; +} + +void Assembler::bind(Label* L) { + ASSERT(!L->is_bound()); // label can only be bound once + bind_to(L, pc_offset()); +} + + +void Assembler::next(Label* L) { + ASSERT(L->is_linked()); + int link = target_at(L->pos()); + if (link == kEndOfChain) { + L->Unuse(); + } else { + ASSERT(link >= 0); + L->link_to(link); + } +} + +bool Assembler::is_near(Label* L, Condition cond) { + ASSERT(L->is_bound()); + if (L->is_bound() == false) + return false; + + int maxReach = ((cond == al) ? 26 : 16); + int offset = L->pos() - pc_offset(); + + return is_intn(offset, maxReach); +} + +void Assembler::a_form(Instr instr, + DwVfpRegister frt, + DwVfpRegister fra, + DwVfpRegister frb, + RCBit r) { + emit(instr | frt.code()*B21 | fra.code()*B16 | frb.code()*B11 | r); +} + +void Assembler::d_form(Instr instr, + Register rt, + Register ra, + const intptr_t val, + bool signed_disp) { + if (signed_disp) { + if (!is_int16(val)) { + PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR "\n", val, val); + } + ASSERT(is_int16(val)); + } else { + if (!is_uint16(val)) { + PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR + ", is_unsigned_imm16(val)=%d, kImm16Mask=0x%x\n", + val, val, is_uint16(val), kImm16Mask); + } + ASSERT(is_uint16(val)); + } + emit(instr | rt.code()*B21 | ra.code()*B16 | (kImm16Mask & val)); +} + +void Assembler::x_form(Instr instr, + Register ra, + Register rs, + Register rb, + RCBit r) { + emit(instr | rs.code()*B21 | ra.code()*B16 | rb.code()*B11 | r); +} + +void Assembler::xo_form(Instr instr, + Register rt, + Register ra, + Register rb, + OEBit o, + RCBit r) { + emit(instr | rt.code()*B21 | ra.code()*B16 | rb.code()*B11 | o | r); +} + +void Assembler::md_form(Instr instr, + Register ra, + Register rs, + int shift, + int maskbit, + RCBit r) { + int sh0_4 = shift & 0x1f; + int sh5 = (shift >> 5) & 0x1; + int m0_4 = maskbit & 0x1f; + int m5 = (maskbit >> 5) & 0x1; + + emit(instr | rs.code()*B21 | ra.code()*B16 | + sh0_4*B11 | m0_4*B6 | m5*B5 | sh5*B1 | r); +} + +// Returns the next free trampoline entry. +int32_t Assembler::get_trampoline_entry() { + int32_t trampoline_entry = kInvalidSlotPos; + + if (!internal_trampoline_exception_) { + trampoline_entry = trampoline_.take_slot(); + + if (kInvalidSlotPos == trampoline_entry) { + internal_trampoline_exception_ = true; + } + } + return trampoline_entry; +} + +int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { + int target_pos; + if (L->is_bound()) { + target_pos = L->pos(); + } else { + if (L->is_linked()) { + target_pos = L->pos(); // L's link + } else { + // was: target_pos = kEndOfChain; + // However, using branch to self to mark the first reference + // should avoid most instances of branch offset overflow. See + // target_at() for where this is converted back to kEndOfChain. + target_pos = pc_offset(); + if (!trampoline_emitted_) { + unbound_labels_count_++; + next_buffer_check_ -= kTrampolineSlotsSize; + } + } + L->link_to(pc_offset()); + } + + return target_pos - pc_offset(); +} + + +void Assembler::label_at_put(Label* L, int at_offset) { + int target_pos; + if (L->is_bound()) { + target_pos = L->pos(); + instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag)); + } else { + if (L->is_linked()) { + target_pos = L->pos(); // L's link + } else { + // was: target_pos = kEndOfChain; + // However, using branch to self to mark the first reference + // should avoid most instances of branch offset overflow. See + // target_at() for where this is converted back to kEndOfChain. + target_pos = at_offset; + if (!trampoline_emitted_) { + unbound_labels_count_++; + next_buffer_check_ -= kTrampolineSlotsSize; + } + } + L->link_to(at_offset); + + Instr constant = target_pos - at_offset; + ASSERT(is_int16(constant)); + instr_at_put(at_offset, constant); + } +} + + +// Branch instructions. + +// PowerPC +void Assembler::bclr(BOfield bo, LKBit lk) { + positions_recorder()->WriteRecordedPositions(); + emit(EXT1 | bo | BCLRX | lk); +} + +void Assembler::bcctr(BOfield bo, LKBit lk) { + positions_recorder()->WriteRecordedPositions(); + emit(EXT1 | bo | BCCTRX | lk); +} + +// Pseudo op - branch to link register +void Assembler::blr() { + bclr(BA, LeaveLK); +} + +// Pseudo op - branch to count register -- used for "jump" +void Assembler::bcr() { + bcctr(BA, LeaveLK); +} + +void Assembler::bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk) { + positions_recorder()->WriteRecordedPositions(); + ASSERT(is_int16(branch_offset)); + emit(BCX | bo | condition_bit*B16 | (kImm16Mask & branch_offset) | lk); +} + +void Assembler::b(int branch_offset, LKBit lk) { + positions_recorder()->WriteRecordedPositions(); + ASSERT((branch_offset & 3) == 0); + int imm26 = branch_offset; + ASSERT(is_int26(imm26)); + // todo add AA and LK bits + emit(BX | (imm26 & kImm26Mask) | lk); +} + +void Assembler::xori(Register dst, Register src, const Operand& imm) { + d_form(XORI, src, dst, imm.imm_, false); +} + +void Assembler::xoris(Register ra, Register rs, const Operand& imm) { + d_form(XORIS, rs, ra, imm.imm_, false); +} + +void Assembler::xor_(Register dst, Register src1, Register src2, RCBit rc) { + x_form(EXT2 | XORX, dst, src1, src2, rc); +} + +void Assembler::cntlzw_(Register ra, Register rs, RCBit rc) { + x_form(EXT2 | CNTLZWX, ra, rs, r0, rc); +} + +void Assembler::and_(Register ra, Register rs, Register rb, RCBit rc) { + x_form(EXT2 | ANDX, ra, rs, rb, rc); +} + + +void Assembler::rlwinm(Register ra, Register rs, + int sh, int mb, int me, RCBit rc) { + sh &= 0x1f; + mb &= 0x1f; + me &= 0x1f; + emit(RLWINMX | rs.code()*B21 | ra.code()*B16 | sh*B11 | mb*B6 | me << 1 | rc); +} + +void Assembler::rlwimi(Register ra, Register rs, + int sh, int mb, int me, RCBit rc) { + sh &= 0x1f; + mb &= 0x1f; + me &= 0x1f; + emit(RLWIMIX | rs.code()*B21 | ra.code()*B16 | sh*B11 | mb*B6 | me << 1 | rc); +} + +void Assembler::slwi(Register dst, Register src, const Operand& val, + RCBit rc) { + ASSERT((32 > val.imm_)&&(val.imm_ >= 0)); + rlwinm(dst, src, val.imm_, 0, 31-val.imm_, rc); +} +void Assembler::srwi(Register dst, Register src, const Operand& val, + RCBit rc) { + ASSERT((32 > val.imm_)&&(val.imm_ >= 0)); + rlwinm(dst, src, 32-val.imm_, val.imm_, 31, rc); +} +void Assembler::clrrwi(Register dst, Register src, const Operand& val, + RCBit rc) { + ASSERT((32 > val.imm_)&&(val.imm_ >= 0)); + rlwinm(dst, src, 0, 0, 31-val.imm_, rc); +} +void Assembler::clrlwi(Register dst, Register src, const Operand& val, + RCBit rc) { + ASSERT((32 > val.imm_)&&(val.imm_ >= 0)); + rlwinm(dst, src, 0, val.imm_, 31, rc); +} + + +void Assembler::srawi(Register ra, Register rs, int sh, RCBit r) { + emit(EXT2 | SRAWIX | rs.code()*B21 | ra.code()*B16 | sh*B11 | r); +} + +void Assembler::srw(Register dst, Register src1, Register src2, RCBit r) { + x_form(EXT2 | SRWX, dst, src1, src2, r); +} + +void Assembler::slw(Register dst, Register src1, Register src2, RCBit r) { + x_form(EXT2 | SLWX, dst, src1, src2, r); +} + +void Assembler::sraw(Register ra, Register rs, Register rb, RCBit r) { + x_form(EXT2 | SRAW, ra, rs, rb, r); +} + +void Assembler::subi(Register dst, Register src, const Operand& imm) { + addi(dst, src, Operand(-(imm.imm_))); +} + +void Assembler::addc(Register dst, Register src1, Register src2, + OEBit o, RCBit r) { + xo_form(EXT2 | ADDCX, dst, src1, src2, o, r); +} + +void Assembler::addze(Register dst, Register src1, OEBit o, RCBit r) { + // a special xo_form + emit(EXT2 | ADDZEX | dst.code()*B21 | src1.code()*B16 | o | r); +} + +void Assembler::sub(Register dst, Register src1, Register src2, + OEBit o, RCBit r) { + xo_form(EXT2 | SUBFX, dst, src2, src1, o, r); +} + +void Assembler::subfc(Register dst, Register src1, Register src2, + OEBit o, RCBit r) { + xo_form(EXT2 | SUBFCX, dst, src2, src1, o, r); +} + +void Assembler::subfic(Register dst, Register src, const Operand& imm) { + d_form(SUBFIC, dst, src, imm.imm_, true); +} + +void Assembler::add(Register dst, Register src1, Register src2, + OEBit o, RCBit r) { + xo_form(EXT2 | ADDX, dst, src1, src2, o, r); +} + +// Multiply low word +void Assembler::mullw(Register dst, Register src1, Register src2, + OEBit o, RCBit r) { + xo_form(EXT2 | MULLW, dst, src1, src2, o, r); +} + +// Multiply hi word +void Assembler::mulhw(Register dst, Register src1, Register src2, + OEBit o, RCBit r) { + xo_form(EXT2 | MULHWX, dst, src1, src2, o, r); +} + +// Divide word +void Assembler::divw(Register dst, Register src1, Register src2, + OEBit o, RCBit r) { + xo_form(EXT2 | DIVW, dst, src1, src2, o, r); +} + +void Assembler::addi(Register dst, Register src, const Operand& imm) { + ASSERT(!src.is(r0)); // use li instead to show intent + d_form(ADDI, dst, src, imm.imm_, true); +} + +void Assembler::addis(Register dst, Register src, const Operand& imm) { + ASSERT(!src.is(r0)); // use lis instead to show intent + d_form(ADDIS, dst, src, imm.imm_, true); +} + +void Assembler::addic(Register dst, Register src, const Operand& imm) { + d_form(ADDIC, dst, src, imm.imm_, true); +} + +void Assembler::andi(Register ra, Register rs, const Operand& imm) { + d_form(ANDIx, rs, ra, imm.imm_, false); +} + +void Assembler::andis(Register ra, Register rs, const Operand& imm) { + d_form(ANDISx, rs, ra, imm.imm_, false); +} + +void Assembler::nor(Register dst, Register src1, Register src2, RCBit r) { + x_form(EXT2 | NORX, dst, src1, src2, r); +} + +void Assembler::notx(Register dst, Register src, RCBit r) { + x_form(EXT2 | NORX, dst, src, src, r); +} + +void Assembler::ori(Register ra, Register rs, const Operand& imm) { + d_form(ORI, rs, ra, imm.imm_, false); +} + +void Assembler::oris(Register dst, Register src, const Operand& imm) { + d_form(ORIS, src, dst, imm.imm_, false); +} + +void Assembler::orx(Register dst, Register src1, Register src2, RCBit rc) { + x_form(EXT2 | ORX, dst, src1, src2, rc); +} + +void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) { + intptr_t imm16 = src2.imm_; +#if V8_TARGET_ARCH_PPC64 + int L = 1; +#else + int L = 0; +#endif + ASSERT(is_int16(imm16)); + ASSERT(cr.code() >= 0 && cr.code() <= 7); + imm16 &= kImm16Mask; + emit(CMPI | cr.code()*B23 | L*B21 | src1.code()*B16 | imm16); +} + +void Assembler::cmpli(Register src1, const Operand& src2, CRegister cr) { + uintptr_t uimm16 = src2.imm_; +#if V8_TARGET_ARCH_PPC64 + int L = 1; +#else + int L = 0; +#endif + ASSERT(is_uint16(uimm16)); + ASSERT(cr.code() >= 0 && cr.code() <= 7); + uimm16 &= kImm16Mask; + emit(CMPLI | cr.code()*B23 | L*B21 | src1.code()*B16 | uimm16); +} + +void Assembler::cmp(Register src1, Register src2, CRegister cr) { +#if V8_TARGET_ARCH_PPC64 + int L = 1; +#else + int L = 0; +#endif + ASSERT(cr.code() >= 0 && cr.code() <= 7); + emit(EXT2 | CMP | cr.code()*B23 | L*B21 | src1.code()*B16 | + src2.code()*B11); +} + +void Assembler::cmpl(Register src1, Register src2, CRegister cr) { +#if V8_TARGET_ARCH_PPC64 + int L = 1; +#else + int L = 0; +#endif + ASSERT(cr.code() >= 0 && cr.code() <= 7); + emit(EXT2 | CMPL | cr.code()*B23 | L*B21 | src1.code()*B16 | + src2.code()*B11); +} + +// Pseudo op - load immediate +void Assembler::li(Register dst, const Operand &imm) { + d_form(ADDI, dst, r0, imm.imm_, true); +} + +void Assembler::lis(Register dst, const Operand& imm) { + d_form(ADDIS, dst, r0, imm.imm_, true); +} + +// Pseudo op - move register +void Assembler::mr(Register dst, Register src) { + // actually or(dst, src, src) + orx(dst, src, src); +} + +void Assembler::lbz(Register dst, const MemOperand &src) { + ASSERT(!src.ra_.is(r0)); + d_form(LBZ, dst, src.ra(), src.offset(), true); +} + +void Assembler::lbzx(Register rt, const MemOperand &src) { + Register ra = src.ra(); + Register rb = src.rb(); + ASSERT(!ra.is(r0)); + emit(EXT2 | LBZX | rt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); +} + +void Assembler::lbzux(Register rt, const MemOperand & src) { + Register ra = src.ra(); + Register rb = src.rb(); + ASSERT(!ra.is(r0)); + emit(EXT2 | LBZUX | rt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); +} + +void Assembler::lhz(Register dst, const MemOperand &src) { + ASSERT(!src.ra_.is(r0)); + d_form(LHZ, dst, src.ra(), src.offset(), true); +} + +void Assembler::lhzx(Register rt, const MemOperand &src) { + Register ra = src.ra(); + Register rb = src.rb(); + ASSERT(!ra.is(r0)); + emit(EXT2 | LHZX | rt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); +} + +void Assembler::lhzux(Register rt, const MemOperand & src) { + Register ra = src.ra(); + Register rb = src.rb(); + ASSERT(!ra.is(r0)); + emit(EXT2 | LHZUX | rt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); +} + +void Assembler::lwz(Register dst, const MemOperand &src) { + ASSERT(!src.ra_.is(r0)); + d_form(LWZ, dst, src.ra(), src.offset(), true); +} + +void Assembler::lwzu(Register dst, const MemOperand &src) { + ASSERT(!src.ra_.is(r0)); + d_form(LWZU, dst, src.ra(), src.offset(), true); +} + +void Assembler::lwzx(Register rt, const MemOperand &src) { + Register ra = src.ra(); + Register rb = src.rb(); + ASSERT(!ra.is(r0)); + emit(EXT2 | LWZX | rt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); +} + +void Assembler::lwzux(Register rt, const MemOperand & src) { + Register ra = src.ra(); + Register rb = src.rb(); + ASSERT(!ra.is(r0)); + emit(EXT2 | LWZUX | rt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); +} + +void Assembler::lwa(Register dst, const MemOperand &src) { +#if V8_TARGET_ARCH_PPC64 + int offset = src.offset(); + ASSERT(!src.ra_.is(r0)); + ASSERT(!(offset & 3) && is_int16(offset)); + offset = kImm16Mask & offset; + emit(LD | dst.code()*B21 | src.ra().code()*B16 | offset | 2); +#else + lwz(dst, src); +#endif +} + +void Assembler::stb(Register dst, const MemOperand &src) { + ASSERT(!src.ra_.is(r0)); + d_form(STB, dst, src.ra(), src.offset(), true); +} + +void Assembler::stbx(Register rs, const MemOperand &src) { + Register ra = src.ra(); + Register rb = src.rb(); + ASSERT(!ra.is(r0)); + emit(EXT2 | STBX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); +} + +void Assembler::stbux(Register rs, const MemOperand &src) { + Register ra = src.ra(); + Register rb = src.rb(); + ASSERT(!ra.is(r0)); + emit(EXT2 | STBUX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); +} + +void Assembler::sth(Register dst, const MemOperand &src) { + ASSERT(!src.ra_.is(r0)); + d_form(STH, dst, src.ra(), src.offset(), true); +} + +void Assembler::sthx(Register rs, const MemOperand &src) { + Register ra = src.ra(); + Register rb = src.rb(); + ASSERT(!ra.is(r0)); + emit(EXT2 | STHX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); +} + +void Assembler::sthux(Register rs, const MemOperand &src) { + Register ra = src.ra(); + Register rb = src.rb(); + ASSERT(!ra.is(r0)); + emit(EXT2 | STHUX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); +} + +void Assembler::stw(Register dst, const MemOperand &src) { + ASSERT(!src.ra_.is(r0)); + d_form(STW, dst, src.ra(), src.offset(), true); +} + +void Assembler::stwu(Register dst, const MemOperand &src) { + ASSERT(!src.ra_.is(r0)); + d_form(STWU, dst, src.ra(), src.offset(), true); +} + +void Assembler::stwx(Register rs, const MemOperand &src) { + Register ra = src.ra(); + Register rb = src.rb(); + ASSERT(!ra.is(r0)); + emit(EXT2 | STWX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); +} + +void Assembler::stwux(Register rs, const MemOperand &src) { + Register ra = src.ra(); + Register rb = src.rb(); + ASSERT(!ra.is(r0)); + emit(EXT2 | STWUX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); +} + +void Assembler::extsb(Register rs, Register ra, RCBit rc) { + emit(EXT2 | EXTSB | ra.code()*B21 | rs.code()*B16 | rc); +} + +void Assembler::extsh(Register rs, Register ra, RCBit rc) { + emit(EXT2 | EXTSH | ra.code()*B21 | rs.code()*B16 | rc); +} + +void Assembler::neg(Register rt, Register ra, OEBit o, RCBit r) { + emit(EXT2 | NEGX | rt.code()*B21 | ra.code()*B16 | o | r); +} + +void Assembler::andc(Register dst, Register src1, Register src2, RCBit rc) { + x_form(EXT2 | ANDCX, dst, src1, src2, rc); +} + +#if V8_TARGET_ARCH_PPC64 +// 64bit specific instructions +void Assembler::ld(Register rd, const MemOperand &src) { + int offset = src.offset(); + ASSERT(!src.ra_.is(r0)); + ASSERT(!(offset & 3) && is_int16(offset)); + offset = kImm16Mask & offset; + emit(LD | rd.code()*B21 | src.ra().code()*B16 | offset); +} + +void Assembler::ldx(Register rd, const MemOperand &src) { + Register ra = src.ra(); + Register rb = src.rb(); + ASSERT(!ra.is(r0)); + emit(EXT2 | LDX | rd.code()*B21 | ra.code()*B16 | rb.code()*B11); +} + +void Assembler::ldu(Register rd, const MemOperand &src) { + int offset = src.offset(); + ASSERT(!src.ra_.is(r0)); + ASSERT(!(offset & 3) && is_int16(offset)); + offset = kImm16Mask & offset; + emit(LD | rd.code()*B21 | src.ra().code()*B16 | offset | 1); +} + +void Assembler::ldux(Register rd, const MemOperand &src) { + Register ra = src.ra(); + Register rb = src.rb(); + ASSERT(!ra.is(r0)); + emit(EXT2 | LDUX | rd.code()*B21 | ra.code()*B16 | rb.code()*B11); +} + +void Assembler::std(Register rs, const MemOperand &src) { + int offset = src.offset(); + ASSERT(!src.ra_.is(r0)); + ASSERT(!(offset & 3) && is_int16(offset)); + offset = kImm16Mask & offset; + emit(STD | rs.code()*B21 | src.ra().code()*B16 | offset); +} + +void Assembler::stdx(Register rs, const MemOperand &src) { + Register ra = src.ra(); + Register rb = src.rb(); + ASSERT(!ra.is(r0)); + emit(EXT2 | STDX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11); +} + +void Assembler::stdu(Register rs, const MemOperand &src) { + int offset = src.offset(); + ASSERT(!src.ra_.is(r0)); + ASSERT(!(offset & 3) && is_int16(offset)); + offset = kImm16Mask & offset; + emit(STD | rs.code()*B21 | src.ra().code()*B16 | offset | 1); +} + +void Assembler::stdux(Register rs, const MemOperand &src) { + Register ra = src.ra(); + Register rb = src.rb(); + ASSERT(!ra.is(r0)); + emit(EXT2 | STDUX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11); +} + +void Assembler::rldic(Register ra, Register rs, int sh, int mb, RCBit r) { + md_form(EXT5 | RLDIC, ra, rs, sh, mb, r); +} + +void Assembler::rldicl(Register ra, Register rs, int sh, int mb, RCBit r) { + md_form(EXT5 | RLDICL, ra, rs, sh, mb, r); +} + +void Assembler::rldicr(Register ra, Register rs, int sh, int me, RCBit r) { + md_form(EXT5 | RLDICR, ra, rs, sh, me, r); +} + +void Assembler::sldi(Register dst, Register src, const Operand& val, + RCBit rc) { + ASSERT((64 > val.imm_)&&(val.imm_ >= 0)); + rldicr(dst, src, val.imm_, 63-val.imm_, rc); +} +void Assembler::srdi(Register dst, Register src, const Operand& val, + RCBit rc) { + ASSERT((64 > val.imm_)&&(val.imm_ >= 0)); + rldicl(dst, src, 64-val.imm_, val.imm_, rc); +} +void Assembler::clrrdi(Register dst, Register src, const Operand& val, + RCBit rc) { + ASSERT((64 > val.imm_)&&(val.imm_ >= 0)); + rldicr(dst, src, 0, 63-val.imm_, rc); +} +void Assembler::clrldi(Register dst, Register src, const Operand& val, + RCBit rc) { + ASSERT((64 > val.imm_)&&(val.imm_ >= 0)); + rldicl(dst, src, 0, val.imm_, rc); +} + + +void Assembler::rldimi(Register ra, Register rs, int sh, int mb, RCBit r) { + md_form(EXT5 | RLDIMI, ra, rs, sh, mb, r); +} + + +void Assembler::sradi(Register ra, Register rs, int sh, RCBit r) { + int sh0_4 = sh & 0x1f; + int sh5 = (sh >> 5) & 0x1; + + emit(EXT2 | SRADIX | rs.code()*B21 | ra.code()*B16 | sh0_4*B11 | sh5*B1 | r); +} + +void Assembler::srd(Register dst, Register src1, Register src2, RCBit r) { + x_form(EXT2 | SRDX, dst, src1, src2, r); +} + +void Assembler::sld(Register dst, Register src1, Register src2, RCBit r) { + x_form(EXT2 | SLDX, dst, src1, src2, r); +} + +void Assembler::srad(Register ra, Register rs, Register rb, RCBit r) { + x_form(EXT2 | SRAD, ra, rs, rb, r); +} + +void Assembler::cntlzd_(Register ra, Register rs, RCBit rc) { + x_form(EXT2 | CNTLZDX, ra, rs, r0, rc); +} + +void Assembler::extsw(Register rs, Register ra, RCBit rc) { + emit(EXT2 | EXTSW | ra.code()*B21 | rs.code()*B16 | rc); +} + +void Assembler::mulld(Register dst, Register src1, Register src2, + OEBit o, RCBit r) { + xo_form(EXT2 | MULLD, dst, src1, src2, o, r); +} + +void Assembler::divd(Register dst, Register src1, Register src2, + OEBit o, RCBit r) { + xo_form(EXT2 | DIVD, dst, src1, src2, o, r); +} +#endif + + +void Assembler::fake_asm(enum FAKE_OPCODE_T fopcode) { + ASSERT(fopcode < fLastFaker); + emit(FAKE_OPCODE | FAKER_SUBOPCODE | fopcode); +} + +void Assembler::marker_asm(int mcode) { + if (::v8::internal::FLAG_trace_sim_stubs) { + ASSERT(mcode < F_NEXT_AVAILABLE_STUB_MARKER); + emit(FAKE_OPCODE | MARKER_SUBOPCODE | mcode); + } +} + +// Function descriptor for AIX. +// Code address skips the function descriptor "header". +// TOC and static chain are ignored and set to 0. +void Assembler::function_descriptor() { + RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); +#if V8_TARGET_ARCH_PPC64 + uint64_t value = reinterpret_cast(pc_) + 3 * kPointerSize; +#if __BYTE_ORDER == __LITTLE_ENDIAN + emit(static_cast(value & 0xFFFFFFFF)); + emit(static_cast(value >> 32)); +#else + emit(static_cast(value >> 32)); + emit(static_cast(value & 0xFFFFFFFF)); +#endif + emit(static_cast(0)); + emit(static_cast(0)); + emit(static_cast(0)); + emit(static_cast(0)); +#else + emit(reinterpret_cast(pc_) + 3 * kPointerSize); + emit(static_cast(0)); + emit(static_cast(0)); +#endif +} +// end PowerPC + +// Primarily used for loading constants +// This should really move to be in macro-assembler as it +// is really a pseudo instruction +// Some usages of this intend for a FIXED_SEQUENCE to be used +// Todo - break this dependency so we can optimize mov() in general +// and only use the generic version when we require a fixed sequence +void Assembler::mov(Register dst, const Operand& src) { + BlockTrampolinePoolScope block_trampoline_pool(this); + if (src.rmode_ != RelocInfo::NONE) { + // some form of relocation needed + RecordRelocInfo(src.rmode_, src.imm_); + } + +#if V8_TARGET_ARCH_PPC64 + int64_t value = src.immediate(); + int32_t hi_32 = static_cast(value) >> 32; + int32_t lo_32 = static_cast(value); + int hi_word = static_cast(hi_32) >> 16; + int lo_word = static_cast(hi_32) & 0xFFFF; + lis(dst, Operand(SIGN_EXT_IMM16(hi_word))); + ori(dst, dst, Operand(lo_word)); + sldi(dst, dst, Operand(32)); + hi_word = (static_cast(lo_32) >> 16) & 0xFFFF; + lo_word = static_cast(lo_32) & 0xFFFF; + oris(dst, dst, Operand(hi_word)); + ori(dst, dst, Operand(lo_word)); +#else + int value = src.immediate(); + if (!is_trampoline_pool_blocked()) { + if (is_int16(value)) { + li(dst, Operand(value)); + return; + } + } + int hi_word = static_cast(value) >> 16; + int lo_word = static_cast(value) & 0XFFFF; + + lis(dst, Operand(SIGN_EXT_IMM16(hi_word))); + if ((!is_trampoline_pool_blocked()) && (lo_word == 0)) { + return; + } + ori(dst, dst, Operand(lo_word)); +#endif +} + +// Special register instructions +void Assembler::crxor(int bt, int ba, int bb) { + emit(EXT1 | CRXOR | bt*B21 | ba*B16 | bb*B11); +} + +void Assembler::mflr(Register dst) { + emit(EXT2 | MFSPR | dst.code()*B21 | 256 << 11); // Ignore RC bit +} + +void Assembler::mtlr(Register src) { + emit(EXT2 | MTSPR | src.code()*B21 | 256 << 11); // Ignore RC bit +} + +void Assembler::mtctr(Register src) { + emit(EXT2 | MTSPR | src.code()*B21 | 288 << 11); // Ignore RC bit +} + +void Assembler::mtxer(Register src) { + emit(EXT2 | MTSPR | src.code()*B21 | 32 << 11); +} + +void Assembler::mcrfs(int bf, int bfa) { + emit(EXT4 | MCRFS | bf*B23 | bfa*B18); +} + +void Assembler::mfcr(Register dst) { + emit(EXT2 | MFCR | dst.code()*B21); +} + +// end PowerPC + +// Exception-generating instructions and debugging support. +// Stops with a non-negative code less than kNumOfWatchedStops support +// enabling/disabling and a counter feature. See simulator-ppc.h . +void Assembler::stop(const char* msg, Condition cond, int32_t code, + CRegister cr) { + if (cond != al) { + Label skip; + b(NegateCondition(cond), &skip, cr); + bkpt(0); + bind(&skip); + } else { + bkpt(0); + } +} + +void Assembler::bkpt(uint32_t imm16) { + emit(0x7d821008); +} + + +void Assembler::info(const char* msg, Condition cond, int32_t code, + CRegister cr) { + if (::v8::internal::FLAG_trace_sim_stubs) { + emit(0x7d9ff808); +#if V8_TARGET_ARCH_PPC64 + uint64_t value = reinterpret_cast(msg); + emit(static_cast(value >> 32)); + emit(static_cast(value & 0xFFFFFFFF)); +#else + emit(reinterpret_cast(msg)); +#endif + } +} + +void Assembler::dcbf(Register ra, Register rb) { + emit(EXT2 | DCBF | ra.code()*B16 | rb.code()*B11); +} + +void Assembler::sync() { + emit(EXT2 | SYNC); +} + +void Assembler::icbi(Register ra, Register rb) { + emit(EXT2 | ICBI | ra.code()*B16 | rb.code()*B11); +} + +void Assembler::isync() { + emit(EXT1 | ISYNC); +} + +// Floating point support + +void Assembler::lfd(const DwVfpRegister frt, const MemOperand &src) { + int offset = src.offset(); + Register ra = src.ra(); + ASSERT(is_int16(offset)); + int imm16 = offset & kImm16Mask; + // could be x_form instruction with some casting magic + emit(LFD | frt.code()*B21 | ra.code()*B16 | imm16); +} + +void Assembler::lfdu(const DwVfpRegister frt, const MemOperand &src) { + int offset = src.offset(); + Register ra = src.ra(); + ASSERT(is_int16(offset)); + int imm16 = offset & kImm16Mask; + // could be x_form instruction with some casting magic + emit(LFDU | frt.code()*B21 | ra.code()*B16 | imm16); +} + +void Assembler::lfdx(const DwVfpRegister frt, const MemOperand &src) { + Register ra = src.ra(); + Register rb = src.rb(); + ASSERT(!ra.is(r0)); + emit(EXT2 | LFDX | frt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); +} + +void Assembler::lfdux(const DwVfpRegister frt, const MemOperand & src) { + Register ra = src.ra(); + Register rb = src.rb(); + ASSERT(!ra.is(r0)); + emit(EXT2 | LFDUX | frt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); +} + +void Assembler::lfs(const DwVfpRegister frt, const MemOperand &src) { + int offset = src.offset(); + Register ra = src.ra(); + ASSERT(is_int16(offset)); + ASSERT(!ra.is(r0)); + int imm16 = offset & kImm16Mask; + // could be x_form instruction with some casting magic + emit(LFS | frt.code()*B21 | ra.code()*B16 | imm16); +} + +void Assembler::lfsu(const DwVfpRegister frt, const MemOperand &src) { + int offset = src.offset(); + Register ra = src.ra(); + ASSERT(is_int16(offset)); + ASSERT(!ra.is(r0)); + int imm16 = offset & kImm16Mask; + // could be x_form instruction with some casting magic + emit(LFSU | frt.code()*B21 | ra.code()*B16 | imm16); +} + +void Assembler::lfsx(const DwVfpRegister frt, const MemOperand &src) { + Register ra = src.ra(); + Register rb = src.rb(); + ASSERT(!ra.is(r0)); + emit(EXT2 | LFSX | frt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); +} + +void Assembler::lfsux(const DwVfpRegister frt, const MemOperand & src) { + Register ra = src.ra(); + Register rb = src.rb(); + ASSERT(!ra.is(r0)); + emit(EXT2 | LFSUX | frt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); +} + +void Assembler::stfd(const DwVfpRegister frs, const MemOperand &src) { + int offset = src.offset(); + Register ra = src.ra(); + ASSERT(is_int16(offset)); + ASSERT(!ra.is(r0)); + int imm16 = offset & kImm16Mask; + // could be x_form instruction with some casting magic + emit(STFD | frs.code()*B21 | ra.code()*B16 | imm16); +} + +void Assembler::stfdu(const DwVfpRegister frs, const MemOperand &src) { + int offset = src.offset(); + Register ra = src.ra(); + ASSERT(is_int16(offset)); + ASSERT(!ra.is(r0)); + int imm16 = offset & kImm16Mask; + // could be x_form instruction with some casting magic + emit(STFDU | frs.code()*B21 | ra.code()*B16 | imm16); +} + +void Assembler::stfdx(const DwVfpRegister frs, const MemOperand &src) { + Register ra = src.ra(); + Register rb = src.rb(); + ASSERT(!ra.is(r0)); + emit(EXT2 | STFDX | frs.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); +} + +void Assembler::stfdux(const DwVfpRegister frs, const MemOperand &src) { + Register ra = src.ra(); + Register rb = src.rb(); + ASSERT(!ra.is(r0)); + emit(EXT2 | STFDUX | frs.code()*B21 | ra.code()*B16 | rb.code()*B11 |LeaveRC); +} + +void Assembler::stfs(const DwVfpRegister frs, const MemOperand &src) { + int offset = src.offset(); + Register ra = src.ra(); + ASSERT(is_int16(offset)); + ASSERT(!ra.is(r0)); + int imm16 = offset & kImm16Mask; + // could be x_form instruction with some casting magic + emit(STFS | frs.code()*B21 | ra.code()*B16 | imm16); +} + +void Assembler::stfsu(const DwVfpRegister frs, const MemOperand &src) { + int offset = src.offset(); + Register ra = src.ra(); + ASSERT(is_int16(offset)); + ASSERT(!ra.is(r0)); + int imm16 = offset & kImm16Mask; + // could be x_form instruction with some casting magic + emit(STFSU | frs.code()*B21 | ra.code()*B16 | imm16); +} + +void Assembler::stfsx(const DwVfpRegister frs, const MemOperand &src) { + Register ra = src.ra(); + Register rb = src.rb(); + ASSERT(!ra.is(r0)); + emit(EXT2 | STFSX | frs.code()*B21 | ra.code()*B16 | rb.code()*B11 |LeaveRC); +} + +void Assembler::stfsux(const DwVfpRegister frs, const MemOperand &src) { + Register ra = src.ra(); + Register rb = src.rb(); + ASSERT(!ra.is(r0)); + emit(EXT2 | STFSUX | frs.code()*B21 | ra.code()*B16 | rb.code()*B11 |LeaveRC); +} + +void Assembler::fsub(const DwVfpRegister frt, + const DwVfpRegister fra, + const DwVfpRegister frb, + RCBit rc) { + a_form(EXT4 | FSUB, frt, fra, frb, rc); +} + +void Assembler::fadd(const DwVfpRegister frt, + const DwVfpRegister fra, + const DwVfpRegister frb, + RCBit rc) { + a_form(EXT4 | FADD, frt, fra, frb, rc); +} +void Assembler::fmul(const DwVfpRegister frt, + const DwVfpRegister fra, + const DwVfpRegister frc, + RCBit rc) { + emit(EXT4 | FMUL | frt.code()*B21 | fra.code()*B16 | frc.code()*B6 | rc); +} +void Assembler::fdiv(const DwVfpRegister frt, + const DwVfpRegister fra, + const DwVfpRegister frb, + RCBit rc) { + a_form(EXT4 | FDIV, frt, fra, frb, rc); +} + +void Assembler::fcmpu(const DwVfpRegister fra, + const DwVfpRegister frb, + CRegister cr) { + ASSERT(cr.code() >= 0 && cr.code() <= 7); + emit(EXT4 | FCMPU | cr.code()*B23 | fra.code()*B16 | frb.code()*B11); +} + +void Assembler::fmr(const DwVfpRegister frt, + const DwVfpRegister frb, + RCBit rc) { + emit(EXT4 | FMR | frt.code()*B21 | frb.code()*B11 | rc); +} + +void Assembler::fctiwz(const DwVfpRegister frt, + const DwVfpRegister frb) { + emit(EXT4 | FCTIWZ | frt.code()*B21 | frb.code()*B11); +} + +void Assembler::fctiw(const DwVfpRegister frt, + const DwVfpRegister frb) { + emit(EXT4 | FCTIW | frt.code()*B21 | frb.code()*B11); +} + +void Assembler::frim(const DwVfpRegister frt, + const DwVfpRegister frb) { + emit(EXT4 | FRIM | frt.code()*B21 | frb.code()*B11); +} + +void Assembler::frsp(const DwVfpRegister frt, + const DwVfpRegister frb, + RCBit rc) { + emit(EXT4 | FRSP | frt.code()*B21 | frb.code()*B11 | rc); +} + +void Assembler::fcfid(const DwVfpRegister frt, + const DwVfpRegister frb, + RCBit rc) { + emit(EXT4 | FCFID | frt.code()*B21 | frb.code()*B11 | rc); +} + +void Assembler::fctid(const DwVfpRegister frt, + const DwVfpRegister frb, + RCBit rc) { + emit(EXT4 | FCTID | frt.code()*B21 | frb.code()*B11 | rc); +} + +void Assembler::fctidz(const DwVfpRegister frt, + const DwVfpRegister frb, + RCBit rc) { + emit(EXT4 | FCTIDZ | frt.code()*B21 | frb.code()*B11 | rc); +} + +void Assembler::fsel(const DwVfpRegister frt, const DwVfpRegister fra, + const DwVfpRegister frc, const DwVfpRegister frb, + RCBit rc) { + emit(EXT4 | FSEL | frt.code()*B21 | fra.code()*B16 | frb.code()*B11 | + frc.code()*B6 | rc); +} + +void Assembler::fneg(const DwVfpRegister frt, + const DwVfpRegister frb, + RCBit rc) { + emit(EXT4 | FNEG | frt.code()*B21 | frb.code()*B11 | rc); +} + +void Assembler::mtfsfi(int bf, int immediate, RCBit rc) { + emit(EXT4 | MTFSFI | bf*B23 | immediate*B12 | rc); +} + +void Assembler::mffs(const DwVfpRegister frt, RCBit rc) { + emit(EXT4 | MFFS | frt.code()*B21 | rc); +} + +void Assembler::mtfsf(const DwVfpRegister frb, bool L, + int FLM, bool W, RCBit rc) { + emit(EXT4 | MTFSF | frb.code()*B11 | W*B16 | FLM*B17 | L*B25 | rc); +} + +void Assembler::fsqrt(const DwVfpRegister frt, + const DwVfpRegister frb, + RCBit rc) { + emit(EXT4 | FSQRT | frt.code()*B21 | frb.code()*B11 | rc); +} + +void Assembler::fabs(const DwVfpRegister frt, + const DwVfpRegister frb, + RCBit rc) { + emit(EXT4 | FABS | frt.code()*B21 | frb.code()*B11 | rc); +} + +// Pseudo instructions. +void Assembler::nop(int type) { + switch (type) { + case 0: + ori(r0, r0, Operand::Zero()); + break; + case DEBUG_BREAK_NOP: + ori(r3, r3, Operand::Zero()); + break; + default: + UNIMPLEMENTED(); + } +} + + +bool Assembler::IsNop(Instr instr, int type) { + ASSERT((0 == type) || (DEBUG_BREAK_NOP == type)); + int reg = 0; + if (DEBUG_BREAK_NOP == type) { + reg = 3; + } + return instr == (ORI | reg*B21 | reg*B16); +} + +// Debugging. +void Assembler::RecordJSReturn() { + positions_recorder()->WriteRecordedPositions(); + CheckBuffer(); + RecordRelocInfo(RelocInfo::JS_RETURN); +} + + +void Assembler::RecordDebugBreakSlot() { + positions_recorder()->WriteRecordedPositions(); + CheckBuffer(); + RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT); +} + + +void Assembler::RecordComment(const char* msg) { + if (FLAG_code_comments) { + CheckBuffer(); + RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast(msg)); + } +} + + +void Assembler::GrowBuffer() { + if (!own_buffer_) FATAL("external code buffer is too small"); + + // Compute new buffer size. + CodeDesc desc; // the new buffer + if (buffer_size_ < 4*KB) { + desc.buffer_size = 4*KB; + } else if (buffer_size_ < 1*MB) { + desc.buffer_size = 2*buffer_size_; + } else { + desc.buffer_size = buffer_size_ + 1*MB; + } + CHECK_GT(desc.buffer_size, 0); // no overflow + + // Set up new buffer. + desc.buffer = NewArray(desc.buffer_size); + + desc.instr_size = pc_offset(); + desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); + + // Copy the data. + intptr_t pc_delta = desc.buffer - buffer_; + intptr_t rc_delta = (desc.buffer + desc.buffer_size) - + (buffer_ + buffer_size_); + memmove(desc.buffer, buffer_, desc.instr_size); + memmove(reloc_info_writer.pos() + rc_delta, + reloc_info_writer.pos(), desc.reloc_size); + + // Switch buffers. + DeleteArray(buffer_); + buffer_ = desc.buffer; + buffer_size_ = desc.buffer_size; + pc_ += pc_delta; + reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, + reloc_info_writer.last_pc() + pc_delta); + + // None of our relocation types are pc relative pointing outside the code + // buffer nor pc absolute pointing inside the code buffer, so there is no need + // to relocate any emitted relocation entries. + +#if ABI_USES_FUNCTION_DESCRIPTORS + // Relocate runtime entries. + for (RelocIterator it(desc); !it.done(); it.next()) { + RelocInfo::Mode rmode = it.rinfo()->rmode(); + if (rmode == RelocInfo::INTERNAL_REFERENCE) { + intptr_t* p = reinterpret_cast(it.rinfo()->pc()); + if (*p != 0) { // 0 means uninitialized. + *p += pc_delta; + } + } + } +#endif +} + + +void Assembler::db(uint8_t data) { + CheckBuffer(); + *reinterpret_cast(pc_) = data; + pc_ += sizeof(uint8_t); +} + + +void Assembler::dd(uint32_t data) { + CheckBuffer(); + *reinterpret_cast(pc_) = data; + pc_ += sizeof(uint32_t); +} + + +void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { + RelocInfo rinfo(pc_, rmode, data, NULL); + if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) { + // Adjust code for new modes. + ASSERT(RelocInfo::IsDebugBreakSlot(rmode) + || RelocInfo::IsJSReturn(rmode) + || RelocInfo::IsComment(rmode) + || RelocInfo::IsPosition(rmode)); + } + if (rinfo.rmode() != RelocInfo::NONE) { + // Don't record external references unless the heap will be serialized. + if (rmode == RelocInfo::EXTERNAL_REFERENCE) { +#ifdef DEBUG + if (!Serializer::enabled()) { + Serializer::TooLateToEnableNow(); + } +#endif + if (!Serializer::enabled() && !emit_debug_code()) { + return; + } + } + ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here + if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { + RelocInfo reloc_info_with_ast_id(pc_, + rmode, + RecordedAstId().ToInt(), + NULL); + ClearRecordedAstId(); + reloc_info_writer.Write(&reloc_info_with_ast_id); + } else { + reloc_info_writer.Write(&rinfo); + } + } +} + + +void Assembler::BlockTrampolinePoolFor(int instructions) { + BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize); +} + + +void Assembler::CheckTrampolinePool() { + // Some small sequences of instructions must not be broken up by the + // insertion of a trampoline pool; such sequences are protected by setting + // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_, + // which are both checked here. Also, recursive calls to CheckTrampolinePool + // are blocked by trampoline_pool_blocked_nesting_. + if ((trampoline_pool_blocked_nesting_ > 0) || + (pc_offset() < no_trampoline_pool_before_)) { + // Emission is currently blocked; make sure we try again as soon as + // possible. + if (trampoline_pool_blocked_nesting_ > 0) { + next_buffer_check_ = pc_offset() + kInstrSize; + } else { + next_buffer_check_ = no_trampoline_pool_before_; + } + return; + } + + ASSERT(!trampoline_emitted_); + ASSERT(unbound_labels_count_ >= 0); + if (unbound_labels_count_ > 0) { + // First we emit jump, then we emit trampoline pool. + { BlockTrampolinePoolScope block_trampoline_pool(this); + Label after_pool; + b(&after_pool); + + int pool_start = pc_offset(); + for (int i = 0; i < unbound_labels_count_; i++) { + b(&after_pool); + } + bind(&after_pool); + trampoline_ = Trampoline(pool_start, unbound_labels_count_); + + trampoline_emitted_ = true; + // As we are only going to emit trampoline once, we need to prevent any + // further emission. + next_buffer_check_ = kMaxInt; + } + } else { + // Number of branches to unbound label at this point is zero, so we can + // move next buffer check to maximum. + next_buffer_check_ = pc_offset() + + kMaxCondBranchReach - kMaxBlockTrampolineSectionSize; + } + return; +} + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_PPC diff --git a/src/ppc/assembler-ppc.h b/src/ppc/assembler-ppc.h new file mode 100644 index 0000000..95bc342 --- /dev/null +++ b/src/ppc/assembler-ppc.h @@ -0,0 +1,1382 @@ +// Copyright (c) 1994-2006 Sun Microsystems Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// - Redistribution in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the +// distribution. +// +// - Neither the name of Sun Microsystems or the names of contributors may +// be used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +// OF THE POSSIBILITY OF SUCH DAMAGE. + +// The original source code covered by the above license above has been +// modified significantly by Google Inc. +// Copyright 2012 the V8 project authors. All rights reserved. + +// +// Copyright IBM Corp. 2012, 2013. All rights reserved. +// + +// A light-weight PPC Assembler +// Generates user mode instructions for the PPC architecture up + +#ifndef V8_PPC_ASSEMBLER_PPC_H_ +#define V8_PPC_ASSEMBLER_PPC_H_ +#include +#if !defined(_AIX) +#include +#include +#include +#endif +#include "assembler.h" +#include "constants-ppc.h" +#include "serialize.h" + +#define ABI_USES_FUNCTION_DESCRIPTORS \ + (V8_HOST_ARCH_PPC && \ + (defined(_AIX) || \ + (defined(V8_TARGET_ARCH_PPC64) && (__BYTE_ORDER != __LITTLE_ENDIAN)))) + +#define ABI_PASSES_HANDLES_IN_REGS \ + (!V8_HOST_ARCH_PPC || defined(_AIX) || defined(V8_TARGET_ARCH_PPC64)) + +#define ABI_RETURNS_HANDLES_IN_REGS \ + (!V8_HOST_ARCH_PPC || (__BYTE_ORDER == __LITTLE_ENDIAN)) + +#define ABI_RETURNS_OBJECT_PAIRS_IN_REGS \ + (!V8_HOST_ARCH_PPC || (__BYTE_ORDER == __LITTLE_ENDIAN)) + +#define ABI_TOC_ADDRESSABILITY_VIA_IP \ + (V8_HOST_ARCH_PPC && defined(V8_TARGET_ARCH_PPC64) && \ + (__BYTE_ORDER == __LITTLE_ENDIAN)) + +namespace v8 { +namespace internal { + +// CPU Registers. +// +// 1) We would prefer to use an enum, but enum values are assignment- +// compatible with int, which has caused code-generation bugs. +// +// 2) We would prefer to use a class instead of a struct but we don't like +// the register initialization to depend on the particular initialization +// order (which appears to be different on OS X, Linux, and Windows for the +// installed versions of C++ we tried). Using a struct permits C-style +// "initialization". Also, the Register objects cannot be const as this +// forces initialization stubs in MSVC, making us dependent on initialization +// order. +// +// 3) By not using an enum, we are possibly preventing the compiler from +// doing certain constant folds, which may significantly reduce the +// code generated for some assembly instructions (because they boil down +// to a few constants). If this is a problem, we could change the code +// such that we use an enum in optimized mode, and the struct in debug +// mode. This way we get the compile-time error checking in debug mode +// and best performance in optimized code. + +// Core register +struct Register { + static const int kNumRegisters = 32; + static const int kNumAllocatableRegisters = 8; // r3-r10 + static const int kSizeInBytes = 4; + + static int ToAllocationIndex(Register reg) { + int index = reg.code() - 3; // r0-r2 are skipped + ASSERT(index < kNumAllocatableRegisters); + return index; + } + + static Register FromAllocationIndex(int index) { + ASSERT(index >= 0 && index < kNumAllocatableRegisters); + return from_code(index + 3); // r0-r2 are skipped + } + + static const char* AllocationIndexToString(int index) { + ASSERT(index >= 0 && index < kNumAllocatableRegisters); + const char* const names[] = { + "r3", + "r4", + "r5", + "r6", + "r7", + "r8", + "r9", + "r10", // currently last allocated register + "r11", // lithium scratch + "r12", // ip + "r13", + "r14", + "r15", + "r16", + "r17", + "r18", + "r19", + "r20", + "r21", + "r22", + "r23", + "r24", + "r25", + "r26", + "r27", + "r28", + "r29", + "r30", + }; + return names[index]; + } + + static Register from_code(int code) { + Register r = { code }; + return r; + } + + bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; } + bool is(Register reg) const { return code_ == reg.code_; } + int code() const { + ASSERT(is_valid()); + return code_; + } + int bit() const { + ASSERT(is_valid()); + return 1 << code_; + } + + void set_code(int code) { + code_ = code; + ASSERT(is_valid()); + } + + // Unfortunately we can't make this private in a struct. + int code_; +}; + +// These constants are used in several locations, including static initializers +const int kRegister_no_reg_Code = -1; +const int kRegister_r0_Code = 0; +const int kRegister_sp_Code = 1; // todo - rename to SP +const int kRegister_r2_Code = 2; // special on PowerPC +const int kRegister_r3_Code = 3; +const int kRegister_r4_Code = 4; +const int kRegister_r5_Code = 5; +const int kRegister_r6_Code = 6; +const int kRegister_r7_Code = 7; +const int kRegister_r8_Code = 8; +const int kRegister_r9_Code = 9; +const int kRegister_r10_Code = 10; +const int kRegister_r11_Code = 11; +const int kRegister_ip_Code = 12; // todo - fix +const int kRegister_r13_Code = 13; +const int kRegister_r14_Code = 14; +const int kRegister_r15_Code = 15; + +const int kRegister_r16_Code = 16; +const int kRegister_r17_Code = 17; +const int kRegister_r18_Code = 18; +const int kRegister_r19_Code = 19; +const int kRegister_r20_Code = 20; +const int kRegister_r21_Code = 21; +const int kRegister_r22_Code = 22; +const int kRegister_r23_Code = 23; +const int kRegister_r24_Code = 24; +const int kRegister_r25_Code = 25; +const int kRegister_r26_Code = 26; +const int kRegister_r27_Code = 27; +const int kRegister_r28_Code = 28; +const int kRegister_r29_Code = 29; +const int kRegister_r30_Code = 30; +const int kRegister_fp_Code = 31; + +const Register no_reg = { kRegister_no_reg_Code }; + +const Register r0 = { kRegister_r0_Code }; +const Register sp = { kRegister_sp_Code }; +const Register r2 = { kRegister_r2_Code }; +const Register r3 = { kRegister_r3_Code }; +const Register r4 = { kRegister_r4_Code }; +const Register r5 = { kRegister_r5_Code }; +const Register r6 = { kRegister_r6_Code }; +const Register r7 = { kRegister_r7_Code }; +const Register r8 = { kRegister_r8_Code }; +const Register r9 = { kRegister_r9_Code }; +const Register r10 = { kRegister_r10_Code }; +// Used as lithium codegen scratch register. +const Register r11 = { kRegister_r11_Code }; +const Register ip = { kRegister_ip_Code }; +// Used as roots register. +const Register r13 = { kRegister_r13_Code }; +const Register r14 = { kRegister_r14_Code }; +const Register r15 = { kRegister_r15_Code }; + +const Register r16 = { kRegister_r16_Code }; +const Register r17 = { kRegister_r17_Code }; +const Register r18 = { kRegister_r18_Code }; +const Register r19 = { kRegister_r19_Code }; +// Used as context register. +const Register r20 = { kRegister_r20_Code }; +const Register r21 = { kRegister_r21_Code }; +const Register r22 = { kRegister_r22_Code }; +const Register r23 = { kRegister_r23_Code }; +const Register r24 = { kRegister_r24_Code }; +const Register r25 = { kRegister_r25_Code }; +const Register r26 = { kRegister_r26_Code }; +const Register r27 = { kRegister_r27_Code }; +const Register r28 = { kRegister_r28_Code }; +const Register r29 = { kRegister_r29_Code }; +const Register r30 = { kRegister_r30_Code }; +const Register fp = { kRegister_fp_Code }; + +// Double word FP register. +struct DwVfpRegister { + static const int kNumRegisters = 32; + static const int kNumVolatileRegisters = 14; // d0-d13 + static const int kNumAllocatableRegisters = 12; // d1-d12 + + inline static int ToAllocationIndex(DwVfpRegister reg); + + static DwVfpRegister FromAllocationIndex(int index) { + ASSERT(index >= 0 && index < kNumAllocatableRegisters); + return from_code(index + 1); // d0 is skipped + } + + static const char* AllocationIndexToString(int index) { + ASSERT(index >= 0 && index < kNumAllocatableRegisters); + const char* const names[] = { + "d1", + "d2", + "d3", + "d4", + "d5", + "d6", + "d7", + "d8", + "d9", + "d10", + "d11", + "d12", + }; + return names[index]; + } + + static DwVfpRegister from_code(int code) { + DwVfpRegister r = { code }; + return r; + } + + // Supporting d0 to d15, can be later extended to d31. + bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; } + bool is(DwVfpRegister reg) const { return code_ == reg.code_; } + + int code() const { + ASSERT(is_valid()); + return code_; + } + int bit() const { + ASSERT(is_valid()); + return 1 << code_; + } + void split_code(int* vm, int* m) const { + ASSERT(is_valid()); + *m = (code_ & 0x10) >> 4; + *vm = code_ & 0x0F; + } + + int code_; +}; + + +typedef DwVfpRegister DoubleRegister; + +const DwVfpRegister no_dreg = { -1 }; +const DwVfpRegister d0 = { 0 }; +const DwVfpRegister d1 = { 1 }; +const DwVfpRegister d2 = { 2 }; +const DwVfpRegister d3 = { 3 }; +const DwVfpRegister d4 = { 4 }; +const DwVfpRegister d5 = { 5 }; +const DwVfpRegister d6 = { 6 }; +const DwVfpRegister d7 = { 7 }; +const DwVfpRegister d8 = { 8 }; +const DwVfpRegister d9 = { 9 }; +const DwVfpRegister d10 = { 10 }; +const DwVfpRegister d11 = { 11 }; +const DwVfpRegister d12 = { 12 }; +const DwVfpRegister d13 = { 13 }; +const DwVfpRegister d14 = { 14 }; +const DwVfpRegister d15 = { 15 }; +const DwVfpRegister d16 = { 16 }; +const DwVfpRegister d17 = { 17 }; +const DwVfpRegister d18 = { 18 }; +const DwVfpRegister d19 = { 19 }; +const DwVfpRegister d20 = { 20 }; +const DwVfpRegister d21 = { 21 }; +const DwVfpRegister d22 = { 22 }; +const DwVfpRegister d23 = { 23 }; +const DwVfpRegister d24 = { 24 }; +const DwVfpRegister d25 = { 25 }; +const DwVfpRegister d26 = { 26 }; +const DwVfpRegister d27 = { 27 }; +const DwVfpRegister d28 = { 28 }; +const DwVfpRegister d29 = { 29 }; +const DwVfpRegister d30 = { 30 }; +const DwVfpRegister d31 = { 31 }; + +// Aliases for double registers. Defined using #define instead of +// "static const DwVfpRegister&" because Clang complains otherwise when a +// compilation unit that includes this header doesn't use the variables. +#define kFirstCalleeSavedDoubleReg d14 +#define kLastCalleeSavedDoubleReg d31 +#define kDoubleRegZero d14 +#define kScratchDoubleReg d13 + +Register ToRegister(int num); + +// Coprocessor register +struct CRegister { + bool is_valid() const { return 0 <= code_ && code_ < 16; } + bool is(CRegister creg) const { return code_ == creg.code_; } + int code() const { + ASSERT(is_valid()); + return code_; + } + int bit() const { + ASSERT(is_valid()); + return 1 << code_; + } + + // Unfortunately we can't make this private in a struct. + int code_; +}; + + +const CRegister no_creg = { -1 }; + +const CRegister cr0 = { 0 }; +const CRegister cr1 = { 1 }; +const CRegister cr2 = { 2 }; +const CRegister cr3 = { 3 }; +const CRegister cr4 = { 4 }; +const CRegister cr5 = { 5 }; +const CRegister cr6 = { 6 }; +const CRegister cr7 = { 7 }; +const CRegister cr8 = { 8 }; +const CRegister cr9 = { 9 }; +const CRegister cr10 = { 10 }; +const CRegister cr11 = { 11 }; +const CRegister cr12 = { 12 }; +const CRegister cr13 = { 13 }; +const CRegister cr14 = { 14 }; +const CRegister cr15 = { 15 }; + +// ----------------------------------------------------------------------------- +// Machine instruction Operands + +// Class Operand represents a shifter operand in data processing instructions +class Operand BASE_EMBEDDED { + public: + // immediate + INLINE(explicit Operand(intptr_t immediate, + RelocInfo::Mode rmode = RelocInfo::NONE)); + INLINE(static Operand Zero()) { + return Operand(static_cast(0)); + } + INLINE(explicit Operand(const ExternalReference& f)); + explicit Operand(Handle handle); + INLINE(explicit Operand(Smi* value)); + + // rm + INLINE(explicit Operand(Register rm)); + + // Return true if this is a register operand. + INLINE(bool is_reg() const); + + inline intptr_t immediate() const { + ASSERT(!rm_.is_valid()); + return imm_; + } + + Register rm() const { return rm_; } + + private: + Register rm_; + intptr_t imm_; // valid if rm_ == no_reg + RelocInfo::Mode rmode_; + + friend class Assembler; + friend class MacroAssembler; +}; + + +// Class MemOperand represents a memory operand in load and store instructions +// On PowerPC we have base register + 16bit signed value +// Alternatively we can have a 16bit signed value immediate +class MemOperand BASE_EMBEDDED { + public: + explicit MemOperand(Register rn, int32_t offset = 0); + + explicit MemOperand(Register ra, Register rb); + + int32_t offset() const { + ASSERT(rb_.is(no_reg)); + return offset_; + } + + // PowerPC - base register + Register ra() const { + ASSERT(!ra_.is(no_reg)); + return ra_; + } + + Register rb() const { + ASSERT(offset_ == 0 && !rb_.is(no_reg)); + return rb_; + } + + private: + Register ra_; // base + int32_t offset_; // offset + Register rb_; // index + + friend class Assembler; +}; + +// CpuFeatures keeps track of which features are supported by the target CPU. +// Supported features must be enabled by a Scope before use. +class CpuFeatures : public AllStatic { + public: + // Detect features of the target CPU. Set safe defaults if the serializer + // is enabled (snapshots must be portable). + static void Probe(); + + // Check whether a feature is supported by the target CPU. + static bool IsSupported(CpuFeature f) { + ASSERT(initialized_); + return (supported_ & (1u << f)) != 0; + } + +#ifdef DEBUG + // Check whether a feature is currently enabled. + static bool IsEnabled(CpuFeature f) { + ASSERT(initialized_); + Isolate* isolate = Isolate::UncheckedCurrent(); + if (isolate == NULL) { + // When no isolate is available, work as if we're running in + // release mode. + return IsSupported(f); + } + unsigned enabled = static_cast(isolate->enabled_cpu_features()); + return (enabled & (1u << f)) != 0; + } +#endif + + // Enable a specified feature within a scope. + class Scope BASE_EMBEDDED { +#ifdef DEBUG + + public: + explicit Scope(CpuFeature f) { + unsigned mask = 1u << f; + ASSERT(CpuFeatures::IsSupported(f)); + ASSERT(!Serializer::enabled() || + (CpuFeatures::found_by_runtime_probing_ & mask) == 0); + isolate_ = Isolate::UncheckedCurrent(); + old_enabled_ = 0; + if (isolate_ != NULL) { + old_enabled_ = static_cast(isolate_->enabled_cpu_features()); + isolate_->set_enabled_cpu_features(old_enabled_ | mask); + } + } + ~Scope() { + ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_); + if (isolate_ != NULL) { + isolate_->set_enabled_cpu_features(old_enabled_); + } + } + + private: + Isolate* isolate_; + unsigned old_enabled_; +#else + + public: + explicit Scope(CpuFeature f) {} +#endif + }; + + class TryForceFeatureScope BASE_EMBEDDED { + public: + explicit TryForceFeatureScope(CpuFeature f) + : old_supported_(CpuFeatures::supported_) { + if (CanForce()) { + CpuFeatures::supported_ |= (1u << f); + } + } + + ~TryForceFeatureScope() { + if (CanForce()) { + CpuFeatures::supported_ = old_supported_; + } + } + + private: + static bool CanForce() { + // It's only safe to temporarily force support of CPU features + // when there's only a single isolate, which is guaranteed when + // the serializer is enabled. + return Serializer::enabled(); + } + + const unsigned old_supported_; + }; + + private: +#ifdef DEBUG + static bool initialized_; +#endif + static unsigned supported_; + static unsigned found_by_runtime_probing_; + + DISALLOW_COPY_AND_ASSIGN(CpuFeatures); +}; + + +class Assembler : public AssemblerBase { + public: + // Create an assembler. Instructions and relocation information are emitted + // into a buffer, with the instructions starting from the beginning and the + // relocation information starting from the end of the buffer. See CodeDesc + // for a detailed comment on the layout (globals.h). + // + // If the provided buffer is NULL, the assembler allocates and grows its own + // buffer, and buffer_size determines the initial buffer size. The buffer is + // owned by the assembler and deallocated upon destruction of the assembler. + // + // If the provided buffer is not NULL, the assembler uses the provided buffer + // for code generation and assumes its size to be buffer_size. If the buffer + // is too small, a fatal error occurs. No deallocation of the buffer is done + // upon destruction of the assembler. + Assembler(Isolate* isolate, void* buffer, int buffer_size); + ~Assembler(); + + // Overrides the default provided by FLAG_debug_code. + void set_emit_debug_code(bool value) { emit_debug_code_ = value; } + + // Avoids using instructions that vary in size in unpredictable ways between + // the snapshot and the running VM. This is needed by the full compiler so + // that it can recompile code with debug support and fix the PC. + void set_predictable_code_size(bool value) { predictable_code_size_ = value; } + + // GetCode emits any pending (non-emitted) code and fills the descriptor + // desc. GetCode() is idempotent; it returns the same result if no other + // Assembler functions are invoked in between GetCode() calls. + void GetCode(CodeDesc* desc); + + // Label operations & relative jumps (PPUM Appendix D) + // + // Takes a branch opcode (cc) and a label (L) and generates + // either a backward branch or a forward branch and links it + // to the label fixup chain. Usage: + // + // Label L; // unbound label + // j(cc, &L); // forward branch to unbound label + // bind(&L); // bind label to the current pc + // j(cc, &L); // backward branch to bound label + // bind(&L); // illegal: a label may be bound only once + // + // Note: The same Label can be used for forward and backward branches + // but it may be bound only once. + + void bind(Label* L); // binds an unbound label L to the current code position + // Determines if Label is bound and near enough so that a single + // branch instruction can be used to reach it. + bool is_near(Label* L, Condition cond); + + // Returns the branch offset to the given label from the current code position + // Links the label to the current position if it is still unbound + // Manages the jump elimination optimization if the second parameter is true. + int branch_offset(Label* L, bool jump_elimination_allowed); + + // Puts a labels target address at the given position. + // The high 8 bits are set to zero. + void label_at_put(Label* L, int at_offset); + + // Read/Modify the code target address in the branch/call instruction at pc. + INLINE(static Address target_address_at(Address pc)); + INLINE(static void set_target_address_at(Address pc, Address target)); + + // Return the code target address at a call site from the return address + // of that call in the instruction stream. + inline static Address target_address_from_return_address(Address pc); + + // This sets the branch destination. + // This is for calls and branches within generated code. + inline static void deserialization_set_special_target_at( + Address instruction_payload, Address target); + + // Size of an instruction. + static const int kInstrSize = sizeof(Instr); + + // Here we are patching the address in the LUI/ORI instruction pair. + // These values are used in the serialization process and must be zero for + // PPC platform, as Code, Embedded Object or External-reference pointers + // are split across two consecutive instructions and don't exist separately + // in the code, so the serializer should not step forwards in memory after + // a target is resolved and written. + static const int kSpecialTargetSize = 0; + + // Number of consecutive instructions used to store pointer sized constant. +#if V8_TARGET_ARCH_PPC64 + static const int kInstructionsForPtrConstant = 5; +#else + static const int kInstructionsForPtrConstant = 2; +#endif + + // Distance between the instruction referring to the address of the call + // target and the return address. + + // Call sequence is a FIXED_SEQUENCE: + // lis r8, 2148 @ call address hi + // ori r8, r8, 5728 @ call address lo + // mtlr r8 + // blrl + // @ return address + // in 64bit mode, the addres load is a 5 instruction sequence +#if V8_TARGET_ARCH_PPC64 + static const int kCallTargetAddressOffset = 7 * kInstrSize; +#else + static const int kCallTargetAddressOffset = 4 * kInstrSize; +#endif + + // Distance between start of patched return sequence and the emitted address + // to jump to. + // Patched return sequence is a FIXED_SEQUENCE: + // lis r0,
+ // ori r0, r0,
+ // mtlr r0 + // blrl + static const int kPatchReturnSequenceAddressOffset = 0 * kInstrSize; + + // Distance between start of patched debug break slot and the emitted address + // to jump to. + // Patched debug break slot code is a FIXED_SEQUENCE: + // lis r0,
+ // ori r0, r0,
+ // mtlr r0 + // blrl + static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize; + + // Difference between address of current opcode and value read from pc + // register. + static const int kPcLoadDelta = 0; // Todo: remove + +#if V8_TARGET_ARCH_PPC64 + static const int kPatchDebugBreakSlotReturnOffset = 7 * kInstrSize; +#else + static const int kPatchDebugBreakSlotReturnOffset = 4 * kInstrSize; +#endif + + // This is the length of the BreakLocationIterator::SetDebugBreakAtReturn() + // code patch FIXED_SEQUENCE +#if V8_TARGET_ARCH_PPC64 + static const int kJSReturnSequenceInstructions = 8; +#else + static const int kJSReturnSequenceInstructions = 5; +#endif + + // This is the length of the code sequence from SetDebugBreakAtSlot() + // FIXED_SEQUENCE +#if V8_TARGET_ARCH_PPC64 + static const int kDebugBreakSlotInstructions = 7; +#else + static const int kDebugBreakSlotInstructions = 4; +#endif + static const int kDebugBreakSlotLength = + kDebugBreakSlotInstructions * kInstrSize; + + static inline int encode_crbit(const CRegister& cr, enum CRBit crbit) { + return ((cr.code() * CRWIDTH) + crbit); + } + + // --------------------------------------------------------------------------- + // Code generation + + // Insert the smallest number of nop instructions + // possible to align the pc offset to a multiple + // of m. m must be a power of 2 (>= 4). + void Align(int m); + // Aligns code to something that's optimal for a jump target for the platform. + void CodeTargetAlign(); + + // Branch instructions + void bclr(BOfield bo, LKBit lk); + void blr(); + void bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk = LeaveLK); + void b(int branch_offset, LKBit lk); + + void bcctr(BOfield bo, LKBit lk); + void bcr(); + + // Convenience branch instructions using labels + void b(Label* L, LKBit lk = LeaveLK) { + b(branch_offset(L, false), lk); + } + + void bc_short(Condition cond, Label* L, CRegister cr = cr7, + LKBit lk = LeaveLK) { + ASSERT(cond != al); + ASSERT(cr.code() >= 0 && cr.code() <= 7); + + int b_offset = branch_offset(L, false); + + switch (cond) { + case eq: + bc(b_offset, BT, encode_crbit(cr, CR_EQ), lk); + break; + case ne: + bc(b_offset, BF, encode_crbit(cr, CR_EQ), lk); + break; + case gt: + bc(b_offset, BT, encode_crbit(cr, CR_GT), lk); + break; + case le: + bc(b_offset, BF, encode_crbit(cr, CR_GT), lk); + break; + case lt: + bc(b_offset, BT, encode_crbit(cr, CR_LT), lk); + break; + case ge: + bc(b_offset, BF, encode_crbit(cr, CR_LT), lk); + break; + case unordered: + bc(b_offset, BT, encode_crbit(cr, CR_FU), lk); + break; + case ordered: + bc(b_offset, BF, encode_crbit(cr, CR_FU), lk); + break; + case overflow: + bc(b_offset, BT, encode_crbit(cr, CR_SO), lk); + break; + case nooverflow: + bc(b_offset, BF, encode_crbit(cr, CR_SO), lk); + break; + default: + UNIMPLEMENTED(); + } + } + + void b(Condition cond, Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { + if (cond == al) { + b(L, lk); + return; + } + + if ((L->is_bound() && is_near(L, cond)) || + !is_trampoline_emitted()) { + bc_short(cond, L, cr, lk); + return; + } + + Label skip; + Condition neg_cond = NegateCondition(cond); + bc_short(neg_cond, &skip, cr); + b(L, lk); + bind(&skip); + } + + void bne(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { + b(ne, L, cr, lk); } + void beq(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { + b(eq, L, cr, lk); } + void blt(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { + b(lt, L, cr, lk); } + void bge(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { + b(ge, L, cr, lk); } + void ble(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { + b(le, L, cr, lk); } + void bgt(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { + b(gt, L, cr, lk); } + void bunordered(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { + b(unordered, L, cr, lk); } + void bordered(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { + b(ordered, L, cr, lk); } + void boverflow(Label* L, CRegister cr = cr0, LKBit lk = LeaveLK) { + b(overflow, L, cr, lk); } + void bnooverflow(Label* L, CRegister cr = cr0, LKBit lk = LeaveLK) { + b(nooverflow, L, cr, lk); } + + // Decrement CTR; branch if CTR != 0 + void bdnz(Label* L, LKBit lk = LeaveLK) { + bc(branch_offset(L, false), DCBNZ, 0, lk); + } + + // Data-processing instructions + + // PowerPC + void sub(Register dst, Register src1, Register src2, + OEBit s = LeaveOE, RCBit r = LeaveRC); + + void subfic(Register dst, Register src, const Operand& imm); + + void subfc(Register dst, Register src1, Register src2, + OEBit s = LeaveOE, RCBit r = LeaveRC); + + void add(Register dst, Register src1, Register src2, + OEBit s = LeaveOE, RCBit r = LeaveRC); + + void addc(Register dst, Register src1, Register src2, + OEBit o = LeaveOE, RCBit r = LeaveRC); + + void addze(Register dst, Register src1, OEBit o, RCBit r); + + void mullw(Register dst, Register src1, Register src2, + OEBit o = LeaveOE, RCBit r = LeaveRC); + + void mulhw(Register dst, Register src1, Register src2, + OEBit o = LeaveOE, RCBit r = LeaveRC); + + void divw(Register dst, Register src1, Register src2, + OEBit o = LeaveOE, RCBit r = LeaveRC); + + void addi(Register dst, Register src, const Operand& imm); + void addis(Register dst, Register src, const Operand& imm); + void addic(Register dst, Register src, const Operand& imm); + + void and_(Register dst, Register src1, Register src2, RCBit rc = LeaveRC); + void andc(Register dst, Register src1, Register src2, RCBit rc = LeaveRC); + void andi(Register ra, Register rs, const Operand& imm); + void andis(Register ra, Register rs, const Operand& imm); + void nor(Register dst, Register src1, Register src2, RCBit r = LeaveRC); + void notx(Register dst, Register src, RCBit r = LeaveRC); + void ori(Register dst, Register src, const Operand& imm); + void oris(Register dst, Register src, const Operand& imm); + void orx(Register dst, Register src1, Register src2, RCBit rc = LeaveRC); + void xori(Register dst, Register src, const Operand& imm); + void xoris(Register ra, Register rs, const Operand& imm); + void xor_(Register dst, Register src1, Register src2, RCBit rc = LeaveRC); + void cmpi(Register src1, const Operand& src2, CRegister cr = cr7); + void cmpli(Register src1, const Operand& src2, CRegister cr = cr7); + void li(Register dst, const Operand& src); + void lis(Register dst, const Operand& imm); + void mr(Register dst, Register src); + + void lbz(Register dst, const MemOperand& src); + void lbzx(Register dst, const MemOperand& src); + void lbzux(Register dst, const MemOperand& src); + void lhz(Register dst, const MemOperand& src); + void lhzx(Register dst, const MemOperand& src); + void lhzux(Register dst, const MemOperand& src); + void lwz(Register dst, const MemOperand& src); + void lwzu(Register dst, const MemOperand& src); + void lwzx(Register dst, const MemOperand& src); + void lwzux(Register dst, const MemOperand& src); + void lwa(Register dst, const MemOperand& src); + void stb(Register dst, const MemOperand& src); + void stbx(Register dst, const MemOperand& src); + void stbux(Register dst, const MemOperand& src); + void sth(Register dst, const MemOperand& src); + void sthx(Register dst, const MemOperand& src); + void sthux(Register dst, const MemOperand& src); + void stw(Register dst, const MemOperand& src); + void stwu(Register dst, const MemOperand& src); + void stwx(Register rs, const MemOperand& src); + void stwux(Register rs, const MemOperand& src); + + void extsb(Register rs, Register ra, RCBit r = LeaveRC); + void extsh(Register rs, Register ra, RCBit r = LeaveRC); + + void neg(Register rt, Register ra, OEBit o = LeaveOE, RCBit c = LeaveRC); + +#if V8_TARGET_ARCH_PPC64 + void ld(Register rd, const MemOperand &src); + void ldx(Register rd, const MemOperand &src); + void ldu(Register rd, const MemOperand &src); + void ldux(Register rd, const MemOperand &src); + void std(Register rs, const MemOperand &src); + void stdx(Register rs, const MemOperand &src); + void stdu(Register rs, const MemOperand &src); + void stdux(Register rs, const MemOperand &src); + void rldic(Register dst, Register src, int sh, int mb, RCBit r = LeaveRC); + void rldicl(Register dst, Register src, int sh, int mb, RCBit r = LeaveRC); + void rldicr(Register dst, Register src, int sh, int me, RCBit r = LeaveRC); + void rldimi(Register dst, Register src, int sh, int mb, RCBit r = LeaveRC); + void sldi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC); + void srdi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC); + void clrrdi(Register dst, Register src, const Operand& val, + RCBit rc = LeaveRC); + void clrldi(Register dst, Register src, const Operand& val, + RCBit rc = LeaveRC); + void sradi(Register ra, Register rs, int sh, RCBit r = LeaveRC); + void srd(Register dst, Register src1, Register src2, RCBit r = LeaveRC); + void sld(Register dst, Register src1, Register src2, RCBit r = LeaveRC); + void srad(Register dst, Register src1, Register src2, RCBit r = LeaveRC); + void cntlzd_(Register dst, Register src, RCBit rc = LeaveRC); + void extsw(Register rs, Register ra, RCBit r = LeaveRC); + void mulld(Register dst, Register src1, Register src2, + OEBit o = LeaveOE, RCBit r = LeaveRC); + void divd(Register dst, Register src1, Register src2, + OEBit o = LeaveOE, RCBit r = LeaveRC); +#endif + + void rlwinm(Register ra, Register rs, int sh, int mb, int me, + RCBit rc = LeaveRC); + void rlwimi(Register ra, Register rs, int sh, int mb, int me, + RCBit rc = LeaveRC); + void slwi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC); + void srwi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC); + void clrrwi(Register dst, Register src, const Operand& val, + RCBit rc = LeaveRC); + void clrlwi(Register dst, Register src, const Operand& val, + RCBit rc = LeaveRC); + void srawi(Register ra, Register rs, int sh, RCBit r = LeaveRC); + void srw(Register dst, Register src1, Register src2, RCBit r = LeaveRC); + void slw(Register dst, Register src1, Register src2, RCBit r = LeaveRC); + void sraw(Register dst, Register src1, Register src2, RCBit r = LeaveRC); + + void cntlzw_(Register dst, Register src, RCBit rc = LeaveRC); + // end PowerPC + + void subi(Register dst, Register src1, const Operand& src2); + + void cmp(Register src1, Register src2, CRegister cr = cr7); + void cmpl(Register src1, Register src2, CRegister cr = cr7); + + void mov(Register dst, const Operand& src); + + // Multiply instructions + + // PowerPC + void mul(Register dst, Register src1, Register src2, + OEBit s = LeaveOE, RCBit r = LeaveRC); + + // Miscellaneous arithmetic instructions + + // Special register access + // PowerPC + void crxor(int bt, int ba, int bb); + void mflr(Register dst); + void mtlr(Register src); + void mtctr(Register src); + void mtxer(Register src); + void mcrfs(int bf, int bfa); + void mfcr(Register dst); + + void fake_asm(enum FAKE_OPCODE_T fopcode); + void marker_asm(int mcode); + void function_descriptor(); + // end PowerPC + + // Exception-generating instructions and debugging support + void stop(const char* msg, + Condition cond = al, + int32_t code = kDefaultStopCode, + CRegister cr = cr7); + + void bkpt(uint32_t imm16); // v5 and above + + // Informational messages when simulating + void info(const char* msg, + Condition cond = al, + int32_t code = kDefaultStopCode, + CRegister cr = cr7); + + void dcbf(Register ra, Register rb); + void sync(); + void icbi(Register ra, Register rb); + void isync(); + + // Support for floating point + void lfd(const DwVfpRegister frt, const MemOperand& src); + void lfdu(const DwVfpRegister frt, const MemOperand& src); + void lfdx(const DwVfpRegister frt, const MemOperand& src); + void lfdux(const DwVfpRegister frt, const MemOperand& src); + void lfs(const DwVfpRegister frt, const MemOperand& src); + void lfsu(const DwVfpRegister frt, const MemOperand& src); + void lfsx(const DwVfpRegister frt, const MemOperand& src); + void lfsux(const DwVfpRegister frt, const MemOperand& src); + void stfd(const DwVfpRegister frs, const MemOperand& src); + void stfdu(const DwVfpRegister frs, const MemOperand& src); + void stfdx(const DwVfpRegister frs, const MemOperand& src); + void stfdux(const DwVfpRegister frs, const MemOperand& src); + void stfs(const DwVfpRegister frs, const MemOperand& src); + void stfsu(const DwVfpRegister frs, const MemOperand& src); + void stfsx(const DwVfpRegister frs, const MemOperand& src); + void stfsux(const DwVfpRegister frs, const MemOperand& src); + + void fadd(const DwVfpRegister frt, const DwVfpRegister fra, + const DwVfpRegister frb, RCBit rc = LeaveRC); + void fsub(const DwVfpRegister frt, const DwVfpRegister fra, + const DwVfpRegister frb, RCBit rc = LeaveRC); + void fdiv(const DwVfpRegister frt, const DwVfpRegister fra, + const DwVfpRegister frb, RCBit rc = LeaveRC); + void fmul(const DwVfpRegister frt, const DwVfpRegister fra, + const DwVfpRegister frc, RCBit rc = LeaveRC); + void fcmpu(const DwVfpRegister fra, const DwVfpRegister frb, + CRegister cr = cr7); + void fmr(const DwVfpRegister frt, const DwVfpRegister frb, + RCBit rc = LeaveRC); + void fctiwz(const DwVfpRegister frt, const DwVfpRegister frb); + void fctiw(const DwVfpRegister frt, const DwVfpRegister frb); + void frim(const DwVfpRegister frt, const DwVfpRegister frb); + void frsp(const DwVfpRegister frt, const DwVfpRegister frb, + RCBit rc = LeaveRC); + void fcfid(const DwVfpRegister frt, const DwVfpRegister frb, + RCBit rc = LeaveRC); + void fctid(const DwVfpRegister frt, const DwVfpRegister frb, + RCBit rc = LeaveRC); + void fctidz(const DwVfpRegister frt, const DwVfpRegister frb, + RCBit rc = LeaveRC); + void fsel(const DwVfpRegister frt, const DwVfpRegister fra, + const DwVfpRegister frc, const DwVfpRegister frb, + RCBit rc = LeaveRC); + void fneg(const DwVfpRegister frt, const DwVfpRegister frb, + RCBit rc = LeaveRC); + void mtfsfi(int bf, int immediate, RCBit rc = LeaveRC); + void mffs(const DwVfpRegister frt, RCBit rc = LeaveRC); + void mtfsf(const DwVfpRegister frb, bool L = 1, int FLM = 0, bool W = 0, + RCBit rc = LeaveRC); + void fsqrt(const DwVfpRegister frt, const DwVfpRegister frb, + RCBit rc = LeaveRC); + void fabs(const DwVfpRegister frt, const DwVfpRegister frb, + RCBit rc = LeaveRC); + + // Pseudo instructions + + // Different nop operations are used by the code generator to detect certain + // states of the generated code. + enum NopMarkerTypes { + NON_MARKING_NOP = 0, + DEBUG_BREAK_NOP, + // IC markers. + PROPERTY_ACCESS_INLINED, + PROPERTY_ACCESS_INLINED_CONTEXT, + PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE, + // Helper values. + LAST_CODE_MARKER, + FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED + }; + + void nop(int type = 0); // 0 is the default non-marking type. + + void push(Register src) { +#if V8_TARGET_ARCH_PPC64 + stdu(src, MemOperand(sp, -8)); +#else + stwu(src, MemOperand(sp, -4)); +#endif + } + + void pop(Register dst) { +#if V8_TARGET_ARCH_PPC64 + ld(dst, MemOperand(sp)); + addi(sp, sp, Operand(8)); +#else + lwz(dst, MemOperand(sp)); + addi(sp, sp, Operand(4)); +#endif + } + + void pop() { + addi(sp, sp, Operand(kPointerSize)); + } + + // Jump unconditionally to given label. + void jmp(Label* L) { b(L); } + + bool predictable_code_size() const { return predictable_code_size_; } + + // Check the code size generated from label to here. + int SizeOfCodeGeneratedSince(Label* label) { + return pc_offset() - label->pos(); + } + + // Check the number of instructions generated from label to here. + int InstructionsGeneratedSince(Label* label) { + return SizeOfCodeGeneratedSince(label) / kInstrSize; + } + + // Class for scoping postponing the trampoline pool generation. + class BlockTrampolinePoolScope { + public: + explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) { + assem_->StartBlockTrampolinePool(); + } + ~BlockTrampolinePoolScope() { + assem_->EndBlockTrampolinePool(); + } + + private: + Assembler* assem_; + + DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope); + }; + + // Debugging + + // Mark address of the ExitJSFrame code. + void RecordJSReturn(); + + // Mark address of a debug break slot. + void RecordDebugBreakSlot(); + + // Record the AST id of the CallIC being compiled, so that it can be placed + // in the relocation information. + void SetRecordedAstId(TypeFeedbackId ast_id) { +// PPC - this shouldn't be failing roohack ASSERT(recorded_ast_id_.IsNone()); + recorded_ast_id_ = ast_id; + } + + TypeFeedbackId RecordedAstId() { + // roohack - another issue??? ASSERT(!recorded_ast_id_.IsNone()); + return recorded_ast_id_; + } + + void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); } + + // Record a comment relocation entry that can be used by a disassembler. + // Use --code-comments to enable. + void RecordComment(const char* msg); + + // Writes a single byte or word of data in the code stream. Used + // for inline tables, e.g., jump-tables. + void db(uint8_t data); + void dd(uint32_t data); + + int pc_offset() const { return pc_ - buffer_; } + + PositionsRecorder* positions_recorder() { return &positions_recorder_; } + + // Read/patch instructions + Instr instr_at(int pos) { return *reinterpret_cast(buffer_ + pos); } + void instr_at_put(int pos, Instr instr) { + *reinterpret_cast(buffer_ + pos) = instr; + } + static Instr instr_at(byte* pc) { return *reinterpret_cast(pc); } + static void instr_at_put(byte* pc, Instr instr) { + *reinterpret_cast(pc) = instr; + } + static Condition GetCondition(Instr instr); + + static bool IsLis(Instr instr); + static bool IsAddic(Instr instr); + static bool IsOri(Instr instr); + + static bool IsBranch(Instr instr); + static Register GetRA(Instr instr); + static Register GetRB(Instr instr); +#if V8_TARGET_ARCH_PPC64 + static bool Is64BitLoadIntoR12(Instr instr1, Instr instr2, + Instr instr3, Instr instr4, Instr instr5); +#else + static bool Is32BitLoadIntoR12(Instr instr1, Instr instr2); +#endif + + static bool IsCmpRegister(Instr instr); + static bool IsCmpImmediate(Instr instr); + static bool IsRlwinm(Instr instr); +#if V8_TARGET_ARCH_PPC64 + static bool IsRldicl(Instr instr); +#endif + static Register GetCmpImmediateRegister(Instr instr); + static int GetCmpImmediateRawImmediate(Instr instr); + static bool IsNop(Instr instr, int type = NON_MARKING_NOP); + + // Postpone the generation of the trampoline pool for the specified number of + // instructions. + void BlockTrampolinePoolFor(int instructions); + void CheckTrampolinePool(); + + protected: + // Relocation for a type-recording IC has the AST id added to it. This + // member variable is a way to pass the information from the call site to + // the relocation info. + TypeFeedbackId recorded_ast_id_; + + bool emit_debug_code() const { return emit_debug_code_; } + + int buffer_space() const { return reloc_info_writer.pos() - pc_; } + + // Decode branch instruction at pos and return branch target pos + int target_at(int pos); + + // Patch branch instruction at pos to branch to given branch target pos + void target_at_put(int pos, int target_pos); + + // Record reloc info for current pc_ + void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); + + // Block the emission of the trampoline pool before pc_offset. + void BlockTrampolinePoolBefore(int pc_offset) { + if (no_trampoline_pool_before_ < pc_offset) + no_trampoline_pool_before_ = pc_offset; + } + + void StartBlockTrampolinePool() { + trampoline_pool_blocked_nesting_++; + } + + void EndBlockTrampolinePool() { + trampoline_pool_blocked_nesting_--; + } + + bool is_trampoline_pool_blocked() const { + return trampoline_pool_blocked_nesting_ > 0; + } + + bool has_exception() const { + return internal_trampoline_exception_; + } + + bool is_trampoline_emitted() const { + return trampoline_emitted_; + } + + + private: + // Code buffer: + // The buffer into which code and relocation info are generated. + byte* buffer_; + int buffer_size_; + // True if the assembler owns the buffer, false if buffer is external. + bool own_buffer_; + + // Code generation + // The relocation writer's position is at least kGap bytes below the end of + // the generated instructions. This is so that multi-instruction sequences do + // not have to check for overflow. The same is true for writes of large + // relocation info entries. + static const int kGap = 32; + byte* pc_; // the program counter; moves forward + + // Repeated checking whether the trampoline pool should be emitted is rather + // expensive. By default we only check again once a number of instructions + // has been generated. + int next_buffer_check_; // pc offset of next buffer check. + + // Emission of the trampoline pool may be blocked in some code sequences. + int trampoline_pool_blocked_nesting_; // Block emission if this is not zero. + int no_trampoline_pool_before_; // Block emission before this pc offset. + + // Relocation info generation + // Each relocation is encoded as a variable size value + static const int kMaxRelocSize = RelocInfoWriter::kMaxSize; + RelocInfoWriter reloc_info_writer; + + // The bound position, before this we cannot do instruction elimination. + int last_bound_pos_; + + // Code emission + inline void CheckBuffer(); + void GrowBuffer(); + inline void emit(Instr x); + inline void CheckTrampolinePoolQuick(); + + // Instruction generation + void a_form(Instr instr, DwVfpRegister frt, DwVfpRegister fra, + DwVfpRegister frb, RCBit r); + void d_form(Instr instr, Register rt, Register ra, const intptr_t val, + bool signed_disp); + void x_form(Instr instr, Register ra, Register rs, Register rb, RCBit r); + void xo_form(Instr instr, Register rt, Register ra, Register rb, + OEBit o, RCBit r); + void md_form(Instr instr, Register ra, Register rs, int shift, int maskbit, + RCBit r); + + // Labels + void print(Label* L); + int max_reach_from(int pos); + void bind_to(Label* L, int pos); + void next(Label* L); + + class Trampoline { + public: + Trampoline() { + next_slot_ = 0; + free_slot_count_ = 0; + } + Trampoline(int start, int slot_count) { + next_slot_ = start; + free_slot_count_ = slot_count; + } + int take_slot() { + int trampoline_slot = kInvalidSlotPos; + if (free_slot_count_ <= 0) { + // We have run out of space on trampolines. + // Make sure we fail in debug mode, so we become aware of each case + // when this happens. + ASSERT(0); + // Internal exception will be caught. + } else { + trampoline_slot = next_slot_; + free_slot_count_--; + next_slot_ += kTrampolineSlotsSize; + } + return trampoline_slot; + } + + private: + int next_slot_; + int free_slot_count_; + }; + + int32_t get_trampoline_entry(); + int unbound_labels_count_; + // If trampoline is emitted, generated code is becoming large. As + // this is already a slow case which can possibly break our code + // generation for the extreme case, we use this information to + // trigger different mode of branch instruction generation, where we + // no longer use a single branch instruction. + bool trampoline_emitted_; + static const int kTrampolineSlotsSize = kInstrSize; + static const int kMaxCondBranchReach = (1 << (16 - 1)) - 1; + static const int kMaxBlockTrampolineSectionSize = 64 * kInstrSize; + static const int kInvalidSlotPos = -1; + + Trampoline trampoline_; + bool internal_trampoline_exception_; + + friend class RegExpMacroAssemblerPPC; + friend class RelocInfo; + friend class CodePatcher; + friend class BlockTrampolinePoolScope; + + PositionsRecorder positions_recorder_; + + bool emit_debug_code_; + bool predictable_code_size_; + + friend class PositionsRecorder; + friend class EnsureSpace; +}; + + +class EnsureSpace BASE_EMBEDDED { + public: + explicit EnsureSpace(Assembler* assembler) { + assembler->CheckBuffer(); + } +}; + +} } // namespace v8::internal + +#endif // V8_PPC_ASSEMBLER_PPC_H_ diff --git a/src/ppc/builtins-ppc.cc b/src/ppc/builtins-ppc.cc new file mode 100644 index 0000000..cdda043 --- /dev/null +++ b/src/ppc/builtins-ppc.cc @@ -0,0 +1,1910 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// +// Copyright IBM Corp. 2012, 2013. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "v8.h" + +#if defined(V8_TARGET_ARCH_PPC) + +#include "codegen.h" +#include "debug.h" +#include "deoptimizer.h" +#include "full-codegen.h" +#include "runtime.h" + +namespace v8 { +namespace internal { + + +#define __ ACCESS_MASM(masm) + +void Builtins::Generate_Adaptor(MacroAssembler* masm, + CFunctionId id, + BuiltinExtraArguments extra_args) { + // ----------- S t a t e ------------- + // -- r3 : number of arguments excluding receiver + // -- r4 : called function (only guaranteed when + // extra_args requires it) + // -- cp : context + // -- sp[0] : last argument + // -- ... + // -- sp[4 * (argc - 1)] : first argument (argc == r0) + // -- sp[4 * argc] : receiver + // ----------------------------------- + + // Insert extra arguments. + int num_extra_args = 0; + if (extra_args == NEEDS_CALLED_FUNCTION) { + num_extra_args = 1; + __ push(r4); + } else { + ASSERT(extra_args == NO_EXTRA_ARGUMENTS); + } + + // JumpToExternalReference expects r0 to contain the number of arguments + // including the receiver and the extra arguments. + __ addi(r3, r3, Operand(num_extra_args + 1)); + __ JumpToExternalReference(ExternalReference(id, masm->isolate())); +} + + +// Load the built-in InternalArray function from the current context. +static void GenerateLoadInternalArrayFunction(MacroAssembler* masm, + Register result) { + // Load the native context. + + __ LoadP(result, + MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); + __ LoadP(result, + FieldMemOperand(result, GlobalObject::kNativeContextOffset)); + // Load the InternalArray function from the native context. + __ LoadP(result, + MemOperand(result, + Context::SlotOffset( + Context::INTERNAL_ARRAY_FUNCTION_INDEX))); +} + + +// Load the built-in Array function from the current context. +static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) { + // Load the native context. + + __ LoadP(result, + MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); + __ LoadP(result, + FieldMemOperand(result, GlobalObject::kNativeContextOffset)); + // Load the Array function from the native context. + __ LoadP(result, + MemOperand(result, + Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX))); +} + + +// Allocate an empty JSArray. The allocated array is put into the result +// register. An elements backing store is allocated with size initial_capacity +// and filled with the hole values. +static void AllocateEmptyJSArray(MacroAssembler* masm, + Register array_function, + Register result, + Register scratch1, + Register scratch2, + Register scratch3, + Label* gc_required) { + const int initial_capacity = JSArray::kPreallocatedArrayElements; + STATIC_ASSERT(initial_capacity >= 0); + __ LoadInitialArrayMap(array_function, scratch2, scratch1, false); + + // Allocate the JSArray object together with space for a fixed array with the + // requested elements. + int size = JSArray::kSize; + if (initial_capacity > 0) { + size += FixedArray::SizeFor(initial_capacity); + } + __ AllocateInNewSpace(size, + result, + scratch2, + scratch3, + gc_required, + TAG_OBJECT); + + // Future optimization: defer tagging the result pointer for more + // efficient 64-bit memory accesses (due to alignment requirements + // on the memoperand offset). + + // Allocated the JSArray. Now initialize the fields except for the elements + // array. + // result: JSObject + // scratch1: initial map + // scratch2: start of next object + __ StoreP(scratch1, FieldMemOperand(result, JSObject::kMapOffset), r0); + __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex); + __ StoreP(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset), r0); + // Field JSArray::kElementsOffset is initialized later. + __ li(scratch3, Operand(0, RelocInfo::NONE)); + __ StoreP(scratch3, FieldMemOperand(result, JSArray::kLengthOffset), r0); + + if (initial_capacity == 0) { + __ StoreP(scratch1, FieldMemOperand(result, JSArray::kElementsOffset), r0); + return; + } + + // Calculate the location of the elements array and set elements array member + // of the JSArray. + // result: JSObject + // scratch2: start of next object + __ addi(scratch1, result, Operand(JSArray::kSize)); + __ StoreP(scratch1, FieldMemOperand(result, JSArray::kElementsOffset), r0); + + // Clear the heap tag on the elements array. + __ subi(scratch1, scratch1, Operand(kHeapObjectTag)); + + // Initialize the FixedArray and fill it with holes. FixedArray length is + // stored as a smi. + // result: JSObject + // scratch1: elements array (untagged) + // scratch2: start of next object + __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex); + STATIC_ASSERT(0 * kPointerSize == FixedArray::kMapOffset); + __ StoreP(scratch3, MemOperand(scratch1)); + __ addi(scratch1, scratch1, Operand(kPointerSize)); + __ LoadSmiLiteral(scratch3, Smi::FromInt(initial_capacity)); + STATIC_ASSERT(1 * kPointerSize == FixedArray::kLengthOffset); + __ StoreP(scratch3, MemOperand(scratch1)); + __ addi(scratch1, scratch1, Operand(kPointerSize)); + + // Fill the FixedArray with the hole value. Inline the code if short. + STATIC_ASSERT(2 * kPointerSize == FixedArray::kHeaderSize); + __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex); + static const int kLoopUnfoldLimit = 4; + if (initial_capacity <= kLoopUnfoldLimit) { + for (int i = 0; i < initial_capacity; i++) { + __ StoreP(scratch3, MemOperand(scratch1)); + __ addi(scratch1, scratch1, Operand(kPointerSize)); + } + } else { + Label loop, entry; + __ addi(scratch2, scratch1, Operand(initial_capacity * kPointerSize)); + __ b(&entry); + __ bind(&loop); + __ StoreP(scratch3, MemOperand(scratch1)); + __ addi(scratch1, scratch1, Operand(kPointerSize)); + __ bind(&entry); + __ cmp(scratch1, scratch2); + __ blt(&loop); + } +} + +// Allocate a JSArray with the number of elements stored in a register. The +// register array_function holds the built-in Array function and the register +// array_size holds the size of the array as a smi. The allocated array is put +// into the result register and beginning and end of the FixedArray elements +// storage is put into registers elements_array_storage and elements_array_end +// (see below for when that is not the case). If the parameter fill_with_holes +// is true the allocated elements backing store is filled with the hole values +// otherwise it is left uninitialized. When the backing store is filled the +// register elements_array_storage is scratched. +static void AllocateJSArray(MacroAssembler* masm, + Register array_function, // Array function. + Register array_size, // As a smi, cannot be 0. + Register result, + Register elements_array_storage, + Register elements_array_end, + Register scratch1, + Register scratch2, + bool fill_with_hole, + Label* gc_required) { + // Load the initial map from the array function. + __ LoadInitialArrayMap(array_function, scratch2, + elements_array_storage, fill_with_hole); + + if (FLAG_debug_code) { // Assert that array size is not zero. + __ cmpi(array_size, Operand::Zero()); + __ Assert(ne, "array size is unexpectedly 0"); + } + + // Allocate the JSArray object together with space for a FixedArray with the + // requested number of elements. We omit the TAG_OBJECT flag and defer + // tagging the pointer until the end so that we can more efficiently perform + // aligned memory accesses. + __ li(elements_array_end, + Operand((JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize)); + __ SmiUntag(scratch1, array_size); + __ add(elements_array_end, elements_array_end, scratch1); + __ AllocateInNewSpace( + elements_array_end, + result, + scratch1, + scratch2, + gc_required, + static_cast(SIZE_IN_WORDS)); + + // Allocated the JSArray. Now initialize the fields except for the elements + // array. + // result: JSObject (untagged) + // elements_array_storage: initial map + // array_size: size of array (smi) + __ StoreP(elements_array_storage, MemOperand(result, JSObject::kMapOffset)); + __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex); + __ StoreP(elements_array_storage, + MemOperand(result, JSArray::kPropertiesOffset)); + // Field JSArray::kElementsOffset is initialized later. + __ StoreP(array_size, MemOperand(result, JSArray::kLengthOffset)); + + // Calculate the location of the elements array and set elements array member + // of the JSArray. + // result: JSObject (untagged) + // array_size: size of array (smi) + __ addi(elements_array_storage, result, + Operand(JSArray::kSize + kHeapObjectTag)); + __ StoreP(elements_array_storage, + MemOperand(result, JSArray::kElementsOffset)); + + // Clear the heap tag on the elements array. + STATIC_ASSERT(kSmiTag == 0); + __ subi(elements_array_storage, + elements_array_storage, + Operand(kHeapObjectTag)); + // Initialize the fixed array and fill it with holes. FixedArray length is + // stored as a smi. + // result: JSObject (untagged) + // elements_array_storage: elements array (untagged) + // array_size: size of array (smi) + __ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex); + ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset); + __ StoreP(scratch1, MemOperand(elements_array_storage)); + __ addi(elements_array_storage, elements_array_storage, + Operand(kPointerSize)); + STATIC_ASSERT(kSmiTag == 0); + ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset); + __ StoreP(array_size, MemOperand(elements_array_storage)); + __ addi(elements_array_storage, elements_array_storage, + Operand(kPointerSize)); + + // Calculate elements array and elements array end. + // result: JSObject (untagged) + // elements_array_storage: elements array element storage + // array_size: smi-tagged size of elements array + __ SmiToPtrArrayOffset(scratch1, array_size); + __ add(elements_array_end, elements_array_storage, scratch1); + + // Fill the allocated FixedArray with the hole value if requested. + // result: JSObject (untagged) + // elements_array_storage: elements array element storage + // elements_array_end: start of next object + if (fill_with_hole) { + Label loop, entry; + __ LoadRoot(scratch1, Heap::kTheHoleValueRootIndex); + __ b(&entry); + __ bind(&loop); + __ StoreP(scratch1, MemOperand(elements_array_storage)); + __ addi(elements_array_storage, elements_array_storage, + Operand(kPointerSize)); + __ bind(&entry); + __ cmp(elements_array_storage, elements_array_end); + __ blt(&loop); + } + + // Tag object + __ addi(result, result, Operand(kHeapObjectTag)); +} + +// Create a new array for the built-in Array function. This function allocates +// the JSArray object and the FixedArray elements array and initializes these. +// If the Array cannot be constructed in native code the runtime is called. This +// function assumes the following state: +// r3: argc +// r4: constructor (built-in Array function) +// lr: return address +// sp[0]: last argument +// This function is used for both construct and normal calls of Array. The only +// difference between handling a construct call and a normal call is that for a +// construct call the constructor function in r1 needs to be preserved for +// entering the generic code. In both cases argc in r0 needs to be preserved. +// Both registers are preserved by this code so no need to differentiate between +// construct call and normal call. +static void ArrayNativeCode(MacroAssembler* masm, + Label* call_generic_code) { + Counters* counters = masm->isolate()->counters(); + Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array, + has_non_smi_element, finish, cant_transition_map, not_double; + + // Check for array construction with zero arguments or one. + __ cmpi(r3, Operand(0, RelocInfo::NONE)); + __ bne(&argc_one_or_more); + + // Handle construction of an empty array. + __ bind(&empty_array); + AllocateEmptyJSArray(masm, + r4, + r5, + r6, + r7, + r8, + call_generic_code); + __ IncrementCounter(counters->array_function_native(), 1, r6, r7); + // Set up return value, remove receiver from stack and return. + __ mr(r3, r5); + __ addi(sp, sp, Operand(kPointerSize)); + __ blr(); + + // Check for one argument. Bail out if argument is not smi or if it is + // negative. + __ bind(&argc_one_or_more); + __ cmpi(r3, Operand(1)); + __ bne(&argc_two_or_more); + STATIC_ASSERT(kSmiTag == 0); + __ LoadP(r5, MemOperand(sp)); // Get the argument from the stack. + __ cmpi(r5, Operand::Zero()); + __ bne(¬_empty_array); + __ Drop(1); // Adjust stack. + __ li(r3, Operand::Zero()); // Treat this as a call with argc of zero. + __ b(&empty_array); + + __ bind(¬_empty_array); + __ TestIfPositiveSmi(r5, r6); + __ bne(call_generic_code, cr0); + + // Handle construction of an empty array of a certain size. Bail out if size + // is too large to actually allocate an elements array. + STATIC_ASSERT(kSmiTag == 0); + __ CmpSmiLiteral(r5, Smi::FromInt(JSObject::kInitialMaxFastElementArray), r0); + __ bge(call_generic_code); + + // r3: argc + // r4: constructor + // r5: array_size (smi) + // sp[0]: argument + AllocateJSArray(masm, + r4, + r5, + r6, + r7, + r8, + r9, + r10, + true, + call_generic_code); + __ IncrementCounter(counters->array_function_native(), 1, r5, r7); + // Set up return value, remove receiver and argument from stack and return. + __ mr(r3, r6); + __ addi(sp, sp, Operand(2 * kPointerSize)); + __ blr(); + + // Handle construction of an array from a list of arguments. + __ bind(&argc_two_or_more); + // Convet argc to a smi. + __ SmiTag(r5, r3); + + // r3: argc + // r4: constructor + // r5: array_size (smi) + // sp[0]: last argument + AllocateJSArray(masm, + r4, + r5, + r6, + r7, + r8, + r9, + r10, + false, + call_generic_code); + __ IncrementCounter(counters->array_function_native(), 1, r5, r9); + + // Fill arguments as array elements. Copy from the top of the stack (last + // element) to the array backing store filling it backwards. Note: + // elements_array_end points after the backing store therefore PreIndex is + // used when filling the backing store. + // r3: argc + // r6: JSArray + // r7: elements_array storage start (untagged) + // r8: elements_array_end (untagged) + // sp[0]: last argument + Label loop, entry; + __ mr(r10, sp); + __ b(&entry); + __ bind(&loop); + __ LoadP(r5, MemOperand(r10)); + __ addi(r10, r10, Operand(kPointerSize)); + if (FLAG_smi_only_arrays) { + __ JumpIfNotSmi(r5, &has_non_smi_element); + } + __ StorePU(r5, MemOperand(r8, -kPointerSize)); + __ bind(&entry); + __ cmp(r7, r8); + __ blt(&loop); + + __ bind(&finish); + __ mr(sp, r10); + + // Remove caller arguments and receiver from the stack, setup return value and + // return. + // r3: argc + // r6: JSArray + // sp[0]: receiver + __ addi(sp, sp, Operand(kPointerSize)); + __ mr(r3, r6); + __ blr(); + + __ bind(&has_non_smi_element); + // Double values are handled by the runtime. + __ CheckMap( + r5, r22, Heap::kHeapNumberMapRootIndex, ¬_double, DONT_DO_SMI_CHECK); + __ bind(&cant_transition_map); + __ UndoAllocationInNewSpace(r6, r7); + __ b(call_generic_code); + + __ bind(¬_double); + // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS. + // r6: JSArray + __ LoadP(r5, FieldMemOperand(r6, HeapObject::kMapOffset)); + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, + FAST_ELEMENTS, + r5, + r22, + &cant_transition_map); + __ StoreP(r5, FieldMemOperand(r6, HeapObject::kMapOffset), r0); + __ RecordWriteField(r6, + HeapObject::kMapOffset, + r5, + r22, + kLRHasNotBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + Label loop2; + __ subi(r10, r10, Operand(kPointerSize)); + __ bind(&loop2); + __ LoadP(r5, MemOperand(r10)); + __ addi(r10, r10, Operand(kPointerSize)); + __ StorePU(r5, MemOperand(r8, -kPointerSize)); + __ cmp(r7, r8); + __ blt(&loop2); + __ b(&finish); +} + + +void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- r3 : number of arguments + // -- lr : return address + // -- sp[...]: constructor arguments + // ----------------------------------- + Label generic_array_code, one_or_more_arguments, two_or_more_arguments; + + // Get the InternalArray function. + GenerateLoadInternalArrayFunction(masm, r4); + + if (FLAG_debug_code) { + // Initial map for the builtin InternalArray functions should be maps. + __ LoadP(r5, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset)); + STATIC_ASSERT(kSmiTagMask < 0x8000); + __ andi(r0, r5, Operand(kSmiTagMask)); + __ Assert(ne, "Unexpected initial map for InternalArray function", cr0); + __ CompareObjectType(r5, r6, r7, MAP_TYPE); + __ Assert(eq, "Unexpected initial map for InternalArray function"); + } + + // Run the native code for the InternalArray function called as a normal + // function. + ArrayNativeCode(masm, &generic_array_code); + + // Jump to the generic array code if the specialized code cannot handle the + // construction. + __ bind(&generic_array_code); + + Handle array_code = + masm->isolate()->builtins()->InternalArrayCodeGeneric(); + __ Jump(array_code, RelocInfo::CODE_TARGET); +} + + +void Builtins::Generate_ArrayCode(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- r3 : number of arguments + // -- lr : return address + // -- sp[...]: constructor arguments + // ----------------------------------- + Label generic_array_code, one_or_more_arguments, two_or_more_arguments; + + // Get the Array function. + GenerateLoadArrayFunction(masm, r4); + + if (FLAG_debug_code) { + // Initial map for the builtin Array functions should be maps. + __ LoadP(r5, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset)); + STATIC_ASSERT(kSmiTagMask < 0x8000); + __ andi(r0, r5, Operand(kSmiTagMask)); + __ Assert(ne, "Unexpected initial map for Array function", cr0); + __ CompareObjectType(r5, r6, r7, MAP_TYPE); + __ Assert(eq, "Unexpected initial map for Array function"); + } + + // Run the native code for the Array function called as a normal function. + ArrayNativeCode(masm, &generic_array_code); + + // Jump to the generic array code if the specialized code cannot handle + // the construction. + __ bind(&generic_array_code); + + Handle array_code = + masm->isolate()->builtins()->ArrayCodeGeneric(); + __ Jump(array_code, RelocInfo::CODE_TARGET); +} + + +void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- r3 : number of arguments + // -- r4 : constructor function + // -- lr : return address + // -- sp[...]: constructor arguments + // ----------------------------------- + Label generic_constructor; + + if (FLAG_debug_code) { + // The array construct code is only set for the builtin and internal + // Array functions which always have a map. + // Initial map for the builtin Array function should be a map. + __ LoadP(r5, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset)); + __ andi(r0, r5, Operand(kSmiTagMask)); + __ Assert(ne, "Unexpected initial map for Array function", cr0); + __ CompareObjectType(r5, r6, r7, MAP_TYPE); + __ Assert(eq, "Unexpected initial map for Array function"); + } + + // Run the native code for the Array function called as a constructor. + ArrayNativeCode(masm, &generic_constructor); + + // Jump to the generic construct code in case the specialized code cannot + // handle the construction. + __ bind(&generic_constructor); + Handle generic_construct_stub = + masm->isolate()->builtins()->JSConstructStubGeneric(); + __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); +} + + +void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- r3 : number of arguments + // -- r4 : constructor function + // -- lr : return address + // -- sp[(argc - n - 1) * 4] : arg[n] (zero based) + // -- sp[argc * 4] : receiver + // ----------------------------------- + Counters* counters = masm->isolate()->counters(); + __ IncrementCounter(counters->string_ctor_calls(), 1, r5, r6); + + Register function = r4; + if (FLAG_debug_code) { + __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, r5); + __ cmp(function, r5); + __ Assert(eq, "Unexpected String function"); + } + + // Load the first arguments in r3 and get rid of the rest. + Label no_arguments; + __ cmpi(r3, Operand(0, RelocInfo::NONE)); + __ beq(&no_arguments); + // First args = sp[(argc - 1) * 4]. + __ subi(r3, r3, Operand(1)); + __ ShiftLeftImm(r3, r3, Operand(kPointerSizeLog2)); + __ add(sp, sp, r3); + __ LoadP(r3, MemOperand(sp)); + // sp now point to args[0], drop args[0] + receiver. + __ Drop(2); + + Register argument = r5; + Label not_cached, argument_is_string; + NumberToStringStub::GenerateLookupNumberStringCache( + masm, + r3, // Input. + argument, // Result. + r6, // Scratch. + r7, // Scratch. + r8, // Scratch. + false, // Is it a Smi? + ¬_cached); + __ IncrementCounter(counters->string_ctor_cached_number(), 1, r6, r7); + __ bind(&argument_is_string); + + // ----------- S t a t e ------------- + // -- r5 : argument converted to string + // -- r4 : constructor function + // -- lr : return address + // ----------------------------------- + + Label gc_required; + __ AllocateInNewSpace(JSValue::kSize, + r3, // Result. + r6, // Scratch. + r7, // Scratch. + &gc_required, + TAG_OBJECT); + + // Initialising the String Object. + Register map = r6; + __ LoadGlobalFunctionInitialMap(function, map, r7); + if (FLAG_debug_code) { + __ lbz(r7, FieldMemOperand(map, Map::kInstanceSizeOffset)); + __ cmpi(r7, Operand(JSValue::kSize >> kPointerSizeLog2)); + __ Assert(eq, "Unexpected string wrapper instance size"); + __ lbz(r7, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset)); + __ cmpi(r7, Operand(0, RelocInfo::NONE)); + __ Assert(eq, "Unexpected unused properties of string wrapper"); + } + __ StoreP(map, FieldMemOperand(r3, HeapObject::kMapOffset), r0); + + __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex); + __ StoreP(r6, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0); + __ StoreP(r6, FieldMemOperand(r3, JSObject::kElementsOffset), r0); + + __ StoreP(argument, FieldMemOperand(r3, JSValue::kValueOffset), r0); + + // Ensure the object is fully initialized. + STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize); + + __ Ret(); + + // The argument was not found in the number to string cache. Check + // if it's a string already before calling the conversion builtin. + Label convert_argument; + __ bind(¬_cached); + __ JumpIfSmi(r3, &convert_argument); + + // Is it a String? + __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset)); + __ lbz(r6, FieldMemOperand(r5, Map::kInstanceTypeOffset)); + STATIC_ASSERT(kNotStringTag != 0); + __ andi(r0, r6, Operand(kIsNotStringMask)); + __ bne(&convert_argument, cr0); + __ mr(argument, r3); + __ IncrementCounter(counters->string_ctor_conversions(), 1, r6, r7); + __ b(&argument_is_string); + + // Invoke the conversion builtin and put the result into r5. + __ bind(&convert_argument); + __ push(function); // Preserve the function. + __ IncrementCounter(counters->string_ctor_conversions(), 1, r6, r7); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(r3); + __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION); + } + __ pop(function); + __ mr(argument, r3); + __ b(&argument_is_string); + + // Load the empty string into r5, remove the receiver from the + // stack, and jump back to the case where the argument is a string. + __ bind(&no_arguments); + __ LoadRoot(argument, Heap::kEmptyStringRootIndex); + __ Drop(1); + __ b(&argument_is_string); + + // At this point the argument is already a string. Call runtime to + // create a string wrapper. + __ bind(&gc_required); + __ IncrementCounter(counters->string_ctor_gc_required(), 1, r6, r7); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(argument); + __ CallRuntime(Runtime::kNewStringWrapper, 1); + } + __ Ret(); +} + + +static void GenerateTailCallToSharedCode(MacroAssembler* masm) { + __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); + __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kCodeOffset)); + __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ mtctr(r5); + __ bcr(); +} + + +void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) { + GenerateTailCallToSharedCode(masm); +} + + +void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) { + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Push a copy of the function onto the stack. + __ push(r4); + // Push call kind information. + __ push(r8); + + __ push(r4); // Function is also the parameter to the runtime call. + __ CallRuntime(Runtime::kParallelRecompile, 1); + + // Restore call kind information. + __ pop(r8); + // Restore receiver. + __ pop(r4); + + // Tear down internal frame. + } + + GenerateTailCallToSharedCode(masm); +} + + +static void Generate_JSConstructStubHelper(MacroAssembler* masm, + bool is_api_function, + bool count_constructions) { + // ----------- S t a t e ------------- + // -- r3 : number of arguments + // -- r4 : constructor function + // -- lr : return address + // -- sp[...]: constructor arguments + // ----------------------------------- + + // Should never count constructions for api objects. + ASSERT(!is_api_function || !count_constructions); + + Isolate* isolate = masm->isolate(); + + // Enter a construct frame. + { + FrameScope scope(masm, StackFrame::CONSTRUCT); + + // Preserve the two incoming parameters on the stack. + __ SmiTag(r3); + __ push(r3); // Smi-tagged arguments count. + __ push(r4); // Constructor function. + + // Try to allocate the object without transitioning into C code. If any of + // the preconditions is not met, the code bails out to the runtime call. + Label rt_call, allocated; + if (FLAG_inline_new) { + Label undo_allocation; +#ifdef ENABLE_DEBUGGER_SUPPORT + ExternalReference debug_step_in_fp = + ExternalReference::debug_step_in_fp_address(isolate); + __ mov(r5, Operand(debug_step_in_fp)); + __ LoadP(r5, MemOperand(r5)); + __ cmpi(r5, Operand::Zero()); + __ bne(&rt_call); +#endif + + // Load the initial map and verify that it is in fact a map. + // r4: constructor function + __ LoadP(r5, FieldMemOperand(r4, + JSFunction::kPrototypeOrInitialMapOffset)); + __ JumpIfSmi(r5, &rt_call); + __ CompareObjectType(r5, r6, r7, MAP_TYPE); + __ bne(&rt_call); + + // Check that the constructor is not constructing a JSFunction (see + // comments in Runtime_NewObject in runtime.cc). In which case the + // initial map's instance type would be JS_FUNCTION_TYPE. + // r4: constructor function + // r5: initial map + __ CompareInstanceType(r5, r6, JS_FUNCTION_TYPE); + __ beq(&rt_call); + + if (count_constructions) { + Label allocate; + // Decrease generous allocation count. + __ LoadP(r6, FieldMemOperand(r4, + JSFunction::kSharedFunctionInfoOffset)); + MemOperand constructor_count = + FieldMemOperand(r6, SharedFunctionInfo::kConstructionCountOffset); + __ lbz(r7, constructor_count); + __ addi(r7, r7, Operand(-1)); + __ stb(r7, constructor_count); + __ cmpi(r7, Operand::Zero()); + __ bne(&allocate); + + __ push(r4); + __ push(r5); + + __ push(r4); // constructor + // The call will replace the stub, so the countdown is only done once. + __ CallRuntime(Runtime::kFinalizeInstanceSize, 1); + + __ pop(r5); + __ pop(r4); + + __ bind(&allocate); + } + + // Now allocate the JSObject on the heap. + // r4: constructor function + // r5: initial map + __ lbz(r6, FieldMemOperand(r5, Map::kInstanceSizeOffset)); + __ AllocateInNewSpace(r6, r7, r8, r9, &rt_call, SIZE_IN_WORDS); + + // Allocated the JSObject, now initialize the fields. Map is set to + // initial map and properties and elements are set to empty fixed array. + // r4: constructor function + // r5: initial map + // r6: object size + // r7: JSObject (not tagged) + __ LoadRoot(r9, Heap::kEmptyFixedArrayRootIndex); + __ mr(r8, r7); + ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); + __ StoreP(r5, MemOperand(r8)); + ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset); + __ StorePU(r9, MemOperand(r8, kPointerSize)); + ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset); + __ StorePU(r9, MemOperand(r8, kPointerSize)); + __ addi(r8, r8, Operand(kPointerSize)); + + // Fill all the in-object properties with the appropriate filler. + // r4: constructor function + // r5: initial map + // r6: object size (in words) + // r7: JSObject (not tagged) + // r8: First in-object property of JSObject (not tagged) + uint32_t byte; + __ ShiftLeftImm(r9, r6, Operand(kPointerSizeLog2)); + __ add(r9, r7, r9); // End of object. + ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize); + __ LoadRoot(r10, Heap::kUndefinedValueRootIndex); + if (count_constructions) { + __ lwz(r3, FieldMemOperand(r5, Map::kInstanceSizesOffset)); + // Fetch Map::kPreAllocatedPropertyFieldsByte field from r3 + // and multiply by kPointerSizeLog2 + STATIC_ASSERT(Map::kPreAllocatedPropertyFieldsByte < 4); + byte = Map::kPreAllocatedPropertyFieldsByte; +#if __BYTE_ORDER == __BIG_ENDIAN + byte = 3 - byte; +#endif + __ ExtractBitRange(r3, r3, + ((byte + 1) * kBitsPerByte) - 1, + byte * kBitsPerByte); + __ ShiftLeftImm(r3, r3, Operand(kPointerSizeLog2)); + __ add(r3, r8, r3); + // r3: offset of first field after pre-allocated fields + if (FLAG_debug_code) { + __ cmp(r3, r9); + __ Assert(le, "Unexpected number of pre-allocated property fields."); + } + __ InitializeFieldsWithFiller(r8, r3, r10); + // To allow for truncation. + __ LoadRoot(r10, Heap::kOnePointerFillerMapRootIndex); + } + __ InitializeFieldsWithFiller(r8, r9, r10); + + // Add the object tag to make the JSObject real, so that we can continue + // and jump into the continuation code at any time from now on. Any + // failures need to undo the allocation, so that the heap is in a + // consistent state and verifiable. + __ addi(r7, r7, Operand(kHeapObjectTag)); + + // Check if a non-empty properties array is needed. Continue with + // allocated object if not fall through to runtime call if it is. + // r4: constructor function + // r7: JSObject + // r8: start of next object (not tagged) + __ lbz(r6, FieldMemOperand(r5, Map::kUnusedPropertyFieldsOffset)); + // The field instance sizes contains both pre-allocated property fields + // and in-object properties. + __ lwz(r3, FieldMemOperand(r5, Map::kInstanceSizesOffset)); + // Fetch Map::kPreAllocatedPropertyFieldsByte field from r3 + STATIC_ASSERT(Map::kPreAllocatedPropertyFieldsByte < 4); + byte = Map::kPreAllocatedPropertyFieldsByte; +#if __BYTE_ORDER == __BIG_ENDIAN + byte = 3 - byte; +#endif + __ ExtractBitRange(r9, r3, + ((byte + 1) * kBitsPerByte) - 1, + byte * kBitsPerByte); + __ add(r6, r6, r9); + STATIC_ASSERT(Map::kInObjectPropertiesByte < 4); + byte = Map::kInObjectPropertiesByte; +#if __BYTE_ORDER == __BIG_ENDIAN + byte = 3 - byte; +#endif + __ ExtractBitRange(r9, r3, + ((byte + 1) * kBitsPerByte) - 1, + byte * kBitsPerByte); + __ sub(r6, r6, r9); // roohack - sub order may be incorrect + __ cmpi(r6, Operand::Zero()); + + // Done if no extra properties are to be allocated. + __ beq(&allocated); + __ Assert(ge, "Property allocation count failed."); + + // Scale the number of elements by pointer size and add the header for + // FixedArrays to the start of the next object calculation from above. + // r4: constructor + // r6: number of elements in properties array + // r7: JSObject + // r8: start of next object + __ addi(r3, r6, Operand(FixedArray::kHeaderSize / kPointerSize)); + __ AllocateInNewSpace( + r3, + r8, + r9, + r5, + &undo_allocation, + static_cast(RESULT_CONTAINS_TOP | SIZE_IN_WORDS)); + + // Initialize the FixedArray. + // r4: constructor + // r6: number of elements in properties array + // r7: JSObject + // r8: FixedArray (not tagged) + __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex); + __ mr(r5, r8); + ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); + __ StoreP(r9, MemOperand(r5)); + ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset); + __ SmiTag(r3, r6); + __ StorePU(r3, MemOperand(r5, kPointerSize)); + __ addi(r5, r5, Operand(kPointerSize)); + + // Initialize the fields to undefined. + // r4: constructor function + // r5: First element of FixedArray (not tagged) + // r6: number of elements in properties array + // r7: JSObject + // r8: FixedArray (not tagged) + __ ShiftLeftImm(r9, r6, Operand(kPointerSizeLog2)); + __ add(r9, r5, r9); // End of object. + ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize); + { Label loop, entry; + if (count_constructions) { + __ LoadRoot(r10, Heap::kUndefinedValueRootIndex); + } else if (FLAG_debug_code) { + __ LoadRoot(r11, Heap::kUndefinedValueRootIndex); + __ cmp(r10, r11); + __ Assert(eq, "Undefined value not loaded."); + } + __ b(&entry); + __ bind(&loop); + __ StoreP(r10, MemOperand(r5)); + __ addi(r5, r5, Operand(kPointerSize)); + __ bind(&entry); + __ cmp(r5, r9); + __ blt(&loop); + } + + // Store the initialized FixedArray into the properties field of + // the JSObject + // r4: constructor function + // r7: JSObject + // r8: FixedArray (not tagged) + __ addi(r8, r8, Operand(kHeapObjectTag)); // Add the heap tag. + __ StoreP(r8, FieldMemOperand(r7, JSObject::kPropertiesOffset), r0); + + // Continue with JSObject being successfully allocated + // r4: constructor function + // r7: JSObject + __ b(&allocated); + + // Undo the setting of the new top so that the heap is verifiable. For + // example, the map's unused properties potentially do not match the + // allocated objects unused properties. + // r7: JSObject (previous new top) + __ bind(&undo_allocation); + __ UndoAllocationInNewSpace(r7, r8); + } + + // Allocate the new receiver object using the runtime call. + // r4: constructor function + __ bind(&rt_call); + __ push(r4); // argument for Runtime_NewObject + __ CallRuntime(Runtime::kNewObject, 1); + __ mr(r7, r3); + + // Receiver for constructor call allocated. + // r7: JSObject + __ bind(&allocated); + __ push(r7); + __ push(r7); + + // Reload the number of arguments and the constructor from the stack. + // sp[0]: receiver + // sp[1]: receiver + // sp[2]: constructor function + // sp[3]: number of arguments (smi-tagged) + __ LoadP(r4, MemOperand(sp, 2 * kPointerSize)); + __ LoadP(r6, MemOperand(sp, 3 * kPointerSize)); + + // Set up pointer to last argument. + __ addi(r5, fp, Operand(StandardFrameConstants::kCallerSPOffset)); + + // Set up number of arguments for function call below + __ SmiUntag(r3, r6); + + // Copy arguments and receiver to the expression stack. + // r3: number of arguments + // r4: constructor function + // r5: address of last argument (caller sp) + // r6: number of arguments (smi-tagged) + // sp[0]: receiver + // sp[1]: receiver + // sp[2]: constructor function + // sp[3]: number of arguments (smi-tagged) + Label loop, no_args; + __ cmpi(r3, Operand::Zero()); + __ beq(&no_args); + __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2)); + __ mtctr(r3); + __ bind(&loop); + __ subi(ip, ip, Operand(kPointerSize)); + __ LoadPX(r0, MemOperand(r5, ip)); + __ push(r0); + __ bdnz(&loop); + __ bind(&no_args); + + // Call the function. + // r3: number of arguments + // r4: constructor function + if (is_api_function) { + __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset)); + Handle code = + masm->isolate()->builtins()->HandleApiCallConstruct(); + ParameterCount expected(0); + __ InvokeCode(code, expected, expected, + RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD); + } else { + ParameterCount actual(r3); + __ InvokeFunction(r4, actual, CALL_FUNCTION, // roohack + NullCallWrapper(), CALL_AS_METHOD); + } + + // Store offset of return address for deoptimizer. + if (!is_api_function && !count_constructions) { + masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset()); + } + + // Restore context from the frame. + // r3: result + // sp[0]: receiver + // sp[1]: constructor function + // sp[2]: number of arguments (smi-tagged) + __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + + // If the result is an object (in the ECMA sense), we should get rid + // of the receiver and use the result; see ECMA-262 section 13.2.2-7 + // on page 74. + Label use_receiver, exit; + + // If the result is a smi, it is *not* an object in the ECMA sense. + // r3: result + // sp[0]: receiver (newly allocated object) + // sp[1]: constructor function + // sp[2]: number of arguments (smi-tagged) + __ JumpIfSmi(r3, &use_receiver); + + // If the type of the result (stored in its map) is less than + // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense. + __ CompareObjectType(r3, r6, r6, FIRST_SPEC_OBJECT_TYPE); + __ bge(&exit); + + // Throw away the result of the constructor invocation and use the + // on-stack receiver as the result. + __ bind(&use_receiver); + __ LoadP(r3, MemOperand(sp)); + + // Remove receiver from the stack, remove caller arguments, and + // return. + __ bind(&exit); + // r3: result + // sp[0]: receiver (newly allocated object) + // sp[1]: constructor function + // sp[2]: number of arguments (smi-tagged) + __ LoadP(r4, MemOperand(sp, 2 * kPointerSize)); + + // Leave construct frame. + } + + __ SmiToPtrArrayOffset(r4, r4); + __ add(sp, sp, r4); + __ addi(sp, sp, Operand(kPointerSize)); + __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r4, r5); + __ blr(); +} + + +void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) { + Generate_JSConstructStubHelper(masm, false, true); +} + + +void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { + Generate_JSConstructStubHelper(masm, false, false); +} + + +void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) { + Generate_JSConstructStubHelper(masm, true, false); +} + + +static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, + bool is_construct) { + // Called from Generate_JS_Entry + // r3: code entry + // r4: function + // r5: receiver + // r6: argc + // r7: argv + // r0,r8-r9, cp may be clobbered + + // Clear the context before we push it when entering the internal frame. + __ li(cp, Operand(0, RelocInfo::NONE)); + + // Enter an internal frame. + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Set up the context from the function argument. + __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset)); + + __ InitializeRootRegister(); + + // Push the function and the receiver onto the stack. + __ push(r4); + __ push(r5); + + // Copy arguments to the stack in a loop. + // r4: function + // r6: argc + // r7: argv, i.e. points to first arg + Label loop, entry; + __ ShiftLeftImm(r0, r6, Operand(kPointerSizeLog2)); + __ add(r5, r7, r0); + // r5 points past last arg. + __ b(&entry); + __ bind(&loop); + __ LoadP(r8, MemOperand(r7)); // read next parameter + __ addi(r7, r7, Operand(kPointerSize)); + __ LoadP(r0, MemOperand(r8)); // dereference handle + __ push(r0); // push parameter + __ bind(&entry); + __ cmp(r7, r5); + __ bne(&loop); + + // Initialize all JavaScript callee-saved registers, since they will be seen + // by the garbage collector as part of handlers. + __ LoadRoot(r7, Heap::kUndefinedValueRootIndex); + __ mr(r14, r7); + __ mr(r15, r7); + __ mr(r16, r7); + __ mr(r22, r7); // hmmm, possibly should be reassigned to r17 + + // Invoke the code and pass argc as r3. + __ mr(r3, r6); + if (is_construct) { + CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); + __ CallStub(&stub); + } else { + ParameterCount actual(r3); + __ InvokeFunction(r4, actual, CALL_FUNCTION, + NullCallWrapper(), CALL_AS_METHOD); + } + // Exit the JS frame and remove the parameters (except function), and + // return. + } + __ blr(); + + // r3: result +} + + +void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) { + Generate_JSEntryTrampolineHelper(masm, false); +} + + +void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { + Generate_JSEntryTrampolineHelper(masm, true); +} + + +void Builtins::Generate_LazyCompile(MacroAssembler* masm) { + // Enter an internal frame. + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Preserve the function. + __ push(r4); + // Push call kind information. + __ push(r8); + + // Push the function on the stack as the argument to the runtime function. + __ push(r4); + __ CallRuntime(Runtime::kLazyCompile, 1); + // Calculate the entry point. + __ addi(r5, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); + + // Restore call kind information. + __ pop(r8); + // Restore saved function. + __ pop(r4); + + // Tear down internal frame. + } + + // Do a tail-call of the compiled function. + __ Jump(r5); +} + + +void Builtins::Generate_LazyRecompile(MacroAssembler* masm) { + // Enter an internal frame. + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Preserve the function. + __ push(r4); + // Push call kind information. + __ push(r8); + + // Push the function on the stack as the argument to the runtime function. + __ push(r4); + __ CallRuntime(Runtime::kLazyRecompile, 1); + // Calculate the entry point. + __ addi(r5, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); + + // Restore call kind information. + __ pop(r8); + // Restore saved function. + __ pop(r4); + + // Tear down internal frame. + } + + // Do a tail-call of the compiled function. + __ Jump(r5); +} + + +static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, + Deoptimizer::BailoutType type) { + { + FrameScope scope(masm, StackFrame::INTERNAL); + // Pass the function and deoptimization type to the runtime system. + __ LoadSmiLiteral(r3, Smi::FromInt(static_cast(type))); + __ push(r3); + __ CallRuntime(Runtime::kNotifyDeoptimized, 1); + } + + // Get the full codegen state from the stack and untag it -> r9. + __ LoadP(r9, MemOperand(sp, 0 * kPointerSize)); + __ SmiUntag(r9); + // Switch on the state. + Label with_tos_register, unknown_state; + __ cmpi(r9, Operand(FullCodeGenerator::NO_REGISTERS)); + __ bne(&with_tos_register); + __ addi(sp, sp, Operand(1 * kPointerSize)); // Remove state. + __ Ret(); + + __ bind(&with_tos_register); + __ LoadP(r3, MemOperand(sp, 1 * kPointerSize)); + __ cmpi(r9, Operand(FullCodeGenerator::TOS_REG)); + __ bne(&unknown_state); + __ addi(sp, sp, Operand(2 * kPointerSize)); // Remove state. + __ Ret(); + + __ bind(&unknown_state); + __ stop("no cases left"); +} + + +void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { + Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER); +} + + +void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) { + Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY); +} + + +void Builtins::Generate_NotifyOSR(MacroAssembler* masm) { + // For now, we are relying on the fact that Runtime::NotifyOSR + // doesn't do any garbage collection which allows us to save/restore + // the registers without worrying about which of them contain + // pointers. This seems a bit fragile. + __ mflr(r0); + RegList saved_regs = + (kJSCallerSaved | kCalleeSaved | r0.bit() | fp.bit()) & ~sp.bit(); + __ MultiPush(saved_regs); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ CallRuntime(Runtime::kNotifyOSR, 0); + } + __ MultiPop(saved_regs); + __ mtlr(r0); + __ Ret(); +} + + +void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { + // Lookup the function in the JavaScript frame and push it as an + // argument to the on-stack replacement function. + __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(r3); + __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1); + } + + // If the result was -1 it means that we couldn't optimize the + // function. Just return and continue in the unoptimized version. + Label skip; + __ CmpSmiLiteral(r3, Smi::FromInt(-1), r0); + __ bne(&skip); + __ Ret(); + + __ bind(&skip); + // Untag the AST id and push it on the stack. + __ SmiUntag(r3); + __ push(r3); + + // Generate the code for doing the frame-to-frame translation using + // the deoptimizer infrastructure. + Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR); + generator.Generate(); +} + + +void Builtins::Generate_FunctionCall(MacroAssembler* masm) { + // 1. Make sure we have at least one argument. + // r3: actual number of arguments + { Label done; + __ cmpi(r3, Operand::Zero()); + __ bne(&done); + __ LoadRoot(r5, Heap::kUndefinedValueRootIndex); + __ push(r5); + __ addi(r3, r3, Operand(1)); + __ bind(&done); + } + + // 2. Get the function to call (passed as receiver) from the stack, check + // if it is a function. + // r3: actual number of arguments + Label slow, non_function; + __ ShiftLeftImm(r4, r3, Operand(kPointerSizeLog2)); + __ add(r4, sp, r4); + __ LoadP(r4, MemOperand(r4)); + __ JumpIfSmi(r4, &non_function); + __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE); + __ bne(&slow); + + // 3a. Patch the first argument if necessary when calling a function. + // r3: actual number of arguments + // r4: function + Label shift_arguments; + __ li(r7, Operand(0, RelocInfo::NONE)); // indicate regular JS_FUNCTION + { Label convert_to_object, use_global_receiver, patch_receiver; + // Change context eagerly in case we need the global receiver. + __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset)); + + // Do not transform the receiver for strict mode functions. + __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); + __ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kCompilerHintsOffset)); + __ TestBit(r6, +#if V8_TARGET_ARCH_PPC64 + SharedFunctionInfo::kStrictModeFunction, +#else + SharedFunctionInfo::kStrictModeFunction + kSmiTagSize, +#endif + r0); + __ bne(&shift_arguments, cr0); + + // Do not transform the receiver for native (Compilerhints already in r6). + __ TestBit(r6, +#if V8_TARGET_ARCH_PPC64 + SharedFunctionInfo::kNative, +#else + SharedFunctionInfo::kNative + kSmiTagSize, +#endif + r0); + __ bne(&shift_arguments, cr0); + + // Compute the receiver in non-strict mode. + __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2)); + __ add(r5, sp, ip); + __ LoadP(r5, MemOperand(r5, -kPointerSize)); + // r3: actual number of arguments + // r4: function + // r5: first argument + __ JumpIfSmi(r5, &convert_to_object); + + __ LoadRoot(r6, Heap::kUndefinedValueRootIndex); + __ cmp(r5, r6); + __ beq(&use_global_receiver); + __ LoadRoot(r6, Heap::kNullValueRootIndex); + __ cmp(r5, r6); + __ beq(&use_global_receiver); + + STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); + __ CompareObjectType(r5, r6, r6, FIRST_SPEC_OBJECT_TYPE); + __ bge(&shift_arguments); + + __ bind(&convert_to_object); + + { + // Enter an internal frame in order to preserve argument count. + FrameScope scope(masm, StackFrame::INTERNAL); + __ SmiTag(r3); + __ push(r3); + + __ push(r5); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ mr(r5, r3); + + __ pop(r3); + __ SmiUntag(r3); + + // Exit the internal frame. + } + + // Restore the function to r4, and the flag to r7. + __ ShiftLeftImm(r7, r3, Operand(kPointerSizeLog2)); + __ add(r7, sp, r7); + __ LoadP(r4, MemOperand(r7)); + __ li(r7, Operand(0, RelocInfo::NONE)); + __ b(&patch_receiver); + + // Use the global receiver object from the called function as the + // receiver. + __ bind(&use_global_receiver); + const int kGlobalIndex = + Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize; + __ LoadP(r5, FieldMemOperand(cp, kGlobalIndex)); + __ LoadP(r5, FieldMemOperand(r5, GlobalObject::kNativeContextOffset)); + __ LoadP(r5, FieldMemOperand(r5, kGlobalIndex)); + __ LoadP(r5, FieldMemOperand(r5, GlobalObject::kGlobalReceiverOffset)); + + __ bind(&patch_receiver); + __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2)); + __ add(r6, sp, ip); + __ StoreP(r5, MemOperand(r6, -kPointerSize)); + + __ b(&shift_arguments); + } + + // 3b. Check for function proxy. + __ bind(&slow); + __ li(r7, Operand(1, RelocInfo::NONE)); // indicate function proxy + __ cmpi(r5, Operand(JS_FUNCTION_PROXY_TYPE)); + __ beq(&shift_arguments); + __ bind(&non_function); + __ li(r7, Operand(2, RelocInfo::NONE)); // indicate non-function + + // 3c. Patch the first argument when calling a non-function. The + // CALL_NON_FUNCTION builtin expects the non-function callee as + // receiver, so overwrite the first argument which will ultimately + // become the receiver. + // r3: actual number of arguments + // r4: function + // r7: call type (0: JS function, 1: function proxy, 2: non-function) + __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2)); + __ add(r5, sp, ip); + __ StoreP(r4, MemOperand(r5, -kPointerSize)); + + // 4. Shift arguments and return address one slot down on the stack + // (overwriting the original receiver). Adjust argument count to make + // the original first argument the new receiver. + // r3: actual number of arguments + // r4: function + // r7: call type (0: JS function, 1: function proxy, 2: non-function) + __ bind(&shift_arguments); + { Label loop; + // Calculate the copy start address (destination). Copy end address is sp. + __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2)); + __ add(r5, sp, ip); + + __ bind(&loop); + __ LoadP(ip, MemOperand(r5, -kPointerSize)); + __ StoreP(ip, MemOperand(r5)); + __ subi(r5, r5, Operand(kPointerSize)); + __ cmp(r5, sp); + __ bne(&loop); + // Adjust the actual number of arguments and remove the top element + // (which is a copy of the last argument). + __ subi(r3, r3, Operand(1)); + __ pop(); + } + + // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin, + // or a function proxy via CALL_FUNCTION_PROXY. + // r3: actual number of arguments + // r4: function + // r7: call type (0: JS function, 1: function proxy, 2: non-function) + { Label function, non_proxy; + __ cmpi(r7, Operand::Zero()); + __ beq(&function); + // Expected number of arguments is 0 for CALL_NON_FUNCTION. + __ li(r5, Operand(0, RelocInfo::NONE)); + __ SetCallKind(r8, CALL_AS_METHOD); + __ cmpi(r7, Operand(1)); + __ bne(&non_proxy); + + __ push(r4); // re-add proxy object as additional argument + __ addi(r3, r3, Operand(1)); + __ GetBuiltinEntry(r6, Builtins::CALL_FUNCTION_PROXY); + __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + RelocInfo::CODE_TARGET); + + __ bind(&non_proxy); + __ GetBuiltinEntry(r6, Builtins::CALL_NON_FUNCTION); + __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + RelocInfo::CODE_TARGET); + __ bind(&function); + } + + // 5b. Get the code to call from the function and check that the number of + // expected arguments matches what we're providing. If so, jump + // (tail-call) to the code in register edx without checking arguments. + // r3: actual number of arguments + // r4: function + __ LoadP(r6, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); + __ LoadWordArith(r5, + FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset)); +#if !defined(V8_TARGET_ARCH_PPC64) + __ SmiUntag(r5); +#endif + __ LoadP(r6, FieldMemOperand(r4, JSFunction::kCodeEntryOffset)); + __ SetCallKind(r8, CALL_AS_METHOD); + __ cmp(r5, r3); // Check formal and actual parameter counts. + Label skip; + __ beq(&skip); + __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + RelocInfo::CODE_TARGET); + + __ bind(&skip); + ParameterCount expected(0); + __ InvokeCode(r6, expected, expected, JUMP_FUNCTION, + NullCallWrapper(), CALL_AS_METHOD); +} + + +void Builtins::Generate_FunctionApply(MacroAssembler* masm) { + const int kIndexOffset = -5 * kPointerSize; + const int kLimitOffset = -4 * kPointerSize; + const int kArgsOffset = 2 * kPointerSize; + const int kRecvOffset = 3 * kPointerSize; + const int kFunctionOffset = 4 * kPointerSize; + + { + FrameScope frame_scope(masm, StackFrame::INTERNAL); + + __ LoadP(r3, MemOperand(fp, kFunctionOffset)); // get the function + __ push(r3); + __ LoadP(r3, MemOperand(fp, kArgsOffset)); // get the args array + __ push(r3); + __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); + + // Check the stack for overflow. We are not trying to catch + // interruptions (e.g. debug break and preemption) here, so the "real stack + // limit" is checked. + Label okay; + __ LoadRoot(r5, Heap::kRealStackLimitRootIndex); + // Make r5 the space we have left. The stack might already be overflowed + // here which will cause r5 to become negative. + __ sub(r5, sp, r5); + // Check if the arguments will overflow the stack. + __ SmiToPtrArrayOffset(r0, r3); + __ cmp(r5, r0); + __ bgt(&okay); // Signed comparison. + + // Out of stack space. + __ LoadP(r4, MemOperand(fp, kFunctionOffset)); + __ push(r4); + __ push(r3); + __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); + // End of stack check. + + // Push current limit and index. + __ bind(&okay); + __ push(r3); // limit + __ li(r4, Operand(0, RelocInfo::NONE)); // initial index + __ push(r4); + + // Get the receiver. + __ LoadP(r3, MemOperand(fp, kRecvOffset)); + + // Check that the function is a JS function (otherwise it must be a proxy). + Label push_receiver; + __ LoadP(r4, MemOperand(fp, kFunctionOffset)); + __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE); + __ bne(&push_receiver); + + // Change context eagerly to get the right global object if necessary. + __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset)); + // Load the shared function info while the function is still in r4. + __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); + + // Compute the receiver. + // Do not transform the receiver for strict mode functions. + Label call_to_object, use_global_receiver; + __ lwz(r5, FieldMemOperand(r5, SharedFunctionInfo::kCompilerHintsOffset)); + __ TestBit(r5, +#if V8_TARGET_ARCH_PPC64 + SharedFunctionInfo::kStrictModeFunction, +#else + SharedFunctionInfo::kStrictModeFunction + kSmiTagSize, +#endif + r0); + __ bne(&push_receiver, cr0); + + // Do not transform the receiver for strict mode functions. + __ TestBit(r5, +#if V8_TARGET_ARCH_PPC64 + SharedFunctionInfo::kNative, +#else + SharedFunctionInfo::kNative + kSmiTagSize, +#endif + r0); + __ bne(&push_receiver, cr0); + + // Compute the receiver in non-strict mode. + __ JumpIfSmi(r3, &call_to_object); + __ LoadRoot(r4, Heap::kNullValueRootIndex); + __ cmp(r3, r4); + __ beq(&use_global_receiver); + __ LoadRoot(r4, Heap::kUndefinedValueRootIndex); + __ cmp(r3, r4); + __ beq(&use_global_receiver); + + // Check if the receiver is already a JavaScript object. + // r3: receiver + STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); + __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE); + __ bge(&push_receiver); + + // Convert the receiver to a regular object. + // r3: receiver + __ bind(&call_to_object); + __ push(r3); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ b(&push_receiver); + + // Use the current global receiver object as the receiver. + __ bind(&use_global_receiver); + const int kGlobalOffset = + Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize; + __ LoadP(r3, FieldMemOperand(cp, kGlobalOffset)); + __ LoadP(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset)); + __ LoadP(r3, FieldMemOperand(r3, kGlobalOffset)); + __ LoadP(r3, FieldMemOperand(r3, GlobalObject::kGlobalReceiverOffset)); + + // Push the receiver. + // r3: receiver + __ bind(&push_receiver); + __ push(r3); + + // Copy all arguments from the array to the stack. + Label entry, loop; + __ LoadP(r3, MemOperand(fp, kIndexOffset)); + __ b(&entry); + + // Load the current argument from the arguments array and push it to the + // stack. + // r3: current argument index + __ bind(&loop); + __ LoadP(r4, MemOperand(fp, kArgsOffset)); + __ push(r4); + __ push(r3); + + // Call the runtime to access the property in the arguments array. + __ CallRuntime(Runtime::kGetProperty, 2); + __ push(r3); + + // Use inline caching to access the arguments. + __ LoadP(r3, MemOperand(fp, kIndexOffset)); + __ AddSmiLiteral(r3, r3, Smi::FromInt(1), r0); + __ StoreP(r3, MemOperand(fp, kIndexOffset)); + + // Test if the copy loop has finished copying all the elements from the + // arguments object. + __ bind(&entry); + __ LoadP(r4, MemOperand(fp, kLimitOffset)); + __ cmp(r3, r4); + __ bne(&loop); + + // Invoke the function. + Label call_proxy; + ParameterCount actual(r3); + __ SmiUntag(r3); + __ LoadP(r4, MemOperand(fp, kFunctionOffset)); + __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE); + __ bne(&call_proxy); + __ InvokeFunction(r4, actual, CALL_FUNCTION, + NullCallWrapper(), CALL_AS_METHOD); + + frame_scope.GenerateLeaveFrame(); + __ addi(sp, sp, Operand(3 * kPointerSize)); + __ blr(); + + // Invoke the function proxy. + __ bind(&call_proxy); + __ push(r4); // add function proxy as last argument + __ addi(r3, r3, Operand(1)); + __ li(r5, Operand(0, RelocInfo::NONE)); + __ SetCallKind(r8, CALL_AS_METHOD); + __ GetBuiltinEntry(r6, Builtins::CALL_FUNCTION_PROXY); + __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + RelocInfo::CODE_TARGET); + + // Tear down the internal frame and remove function, receiver and args. + } + __ addi(sp, sp, Operand(3 * kPointerSize)); + __ blr(); +} + + +static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { + __ SmiTag(r3); + __ LoadSmiLiteral(r7, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); + __ mflr(r0); + __ push(r0); + __ Push(fp, r7, r4, r3); + __ addi(fp, sp, Operand(3 * kPointerSize)); +} + + +static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- r3 : result being passed through + // ----------------------------------- + // Get the number of arguments passed (as a smi), tear down the frame and + // then tear down the parameters. + __ LoadP(r4, MemOperand(fp, -3 * kPointerSize)); + __ mr(sp, fp); + __ LoadP(fp, MemOperand(sp)); + __ LoadP(r0, MemOperand(sp, kPointerSize)); + __ mtlr(r0); + __ SmiToPtrArrayOffset(r0, r4); + __ add(sp, sp, r0); + __ addi(sp, sp, Operand(3 * kPointerSize)); // adjust for receiver + fp + lr +} + + +void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- r3 : actual number of arguments + // -- r4 : function (passed through to callee) + // -- r5 : expected number of arguments + // -- r6 : code entry to call + // -- r8 : call kind information + // ----------------------------------- + + Label invoke, dont_adapt_arguments; + + Label enough, too_few; + __ cmp(r3, r5); + __ blt(&too_few); + __ cmpi(r5, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel)); + __ beq(&dont_adapt_arguments); + + { // Enough parameters: actual >= expected + __ bind(&enough); + EnterArgumentsAdaptorFrame(masm); + + // Calculate copy start address into r3 and copy end address into r5. + // r3: actual number of arguments as a smi + // r4: function + // r5: expected number of arguments + // r6: code entry to call + __ SmiToPtrArrayOffset(r3, r3); + __ add(r3, r3, fp); + // adjust for return address and receiver + __ addi(r3, r3, Operand(2 * kPointerSize)); + __ ShiftLeftImm(r5, r5, Operand(kPointerSizeLog2)); + __ sub(r5, r3, r5); + + // Copy the arguments (including the receiver) to the new stack frame. + // r3: copy start address + // r4: function + // r5: copy end address + // r6: code entry to call + + Label copy; + __ bind(©); + __ LoadP(ip, MemOperand(r3, 0)); + __ push(ip); + __ cmp(r3, r5); // Compare before moving to next argument. + __ subi(r3, r3, Operand(kPointerSize)); + __ bne(©); + + __ b(&invoke); + } + + { // Too few parameters: Actual < expected + __ bind(&too_few); + EnterArgumentsAdaptorFrame(masm); + + // Calculate copy start address into r0 and copy end address is fp. + // r3: actual number of arguments as a smi + // r4: function + // r5: expected number of arguments + // r6: code entry to call + __ SmiToPtrArrayOffset(r3, r3); + __ add(r3, r3, fp); + + // Copy the arguments (including the receiver) to the new stack frame. + // r3: copy start address + // r4: function + // r5: expected number of arguments + // r6: code entry to call + Label copy; + __ bind(©); + // Adjust load for return address and receiver. + __ LoadP(ip, MemOperand(r3, 2 * kPointerSize)); + __ push(ip); + __ cmp(r3, fp); // Compare before moving to next argument. + __ subi(r3, r3, Operand(kPointerSize)); + __ bne(©); + + // Fill the remaining expected arguments with undefined. + // r4: function + // r5: expected number of arguments + // r6: code entry to call + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ ShiftLeftImm(r5, r5, Operand(kPointerSizeLog2)); + __ sub(r5, fp, r5); + __ subi(r5, r5, Operand(4 * kPointerSize)); // Adjust for frame. + + Label fill; + __ bind(&fill); + __ push(ip); + __ cmp(sp, r5); + __ bne(&fill); + } + + // Call the entry point. + __ bind(&invoke); + __ Call(r6); + + // Store offset of return address for deoptimizer. + masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset()); + + // Exit frame and return. + LeaveArgumentsAdaptorFrame(masm); + __ blr(); + + + // ------------------------------------------- + // Dont adapt arguments. + // ------------------------------------------- + __ bind(&dont_adapt_arguments); + __ Jump(r6); +} + + +#undef __ + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_PPC diff --git a/src/ppc/code-stubs-ppc.cc b/src/ppc/code-stubs-ppc.cc new file mode 100644 index 0000000..fe52630 --- /dev/null +++ b/src/ppc/code-stubs-ppc.cc @@ -0,0 +1,7530 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// +// Copyright IBM Corp. 2012, 2013. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "v8.h" +#if defined(V8_TARGET_ARCH_PPC) + +#include "bootstrapper.h" +#include "code-stubs.h" +#include "regexp-macro-assembler.h" +#include "ppc/regexp-macro-assembler-ppc.h" + +namespace v8 { +namespace internal { + + +#define __ ACCESS_MASM(masm) + +static void EmitIdenticalObjectComparison(MacroAssembler* masm, + Label* slow, + Condition cond, + bool never_nan_nan); +static void EmitSmiNonsmiComparison(MacroAssembler* masm, + Register lhs, + Register rhs, + Label* lhs_not_nan, + Label* slow, + bool strict); +static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, + Register lhs, + Register rhs); + + +// Check if the operand is a heap number. +static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand, + Register scratch1, Register scratch2, + Label* not_a_heap_number) { + __ LoadP(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset)); + __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex); + __ cmp(scratch1, scratch2); + __ bne(not_a_heap_number); +} + + +void ToNumberStub::Generate(MacroAssembler* masm) { + // The ToNumber stub takes one argument in eax. + Label check_heap_number, call_builtin; + __ JumpIfNotSmi(r3, &check_heap_number); + __ Ret(); + + __ bind(&check_heap_number); + EmitCheckForHeapNumber(masm, r3, r4, ip, &call_builtin); + __ Ret(); + + __ bind(&call_builtin); + __ push(r3); + __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION); +} + + +void FastNewClosureStub::Generate(MacroAssembler* masm) { + // Create a new closure from the given function info in new + // space. Set the context to the current context in cp. + Counters* counters = masm->isolate()->counters(); + + Label gc; + + // Pop the function info from the stack. + __ pop(r6); + + // Attempt to allocate new JSFunction in new space. + __ AllocateInNewSpace(JSFunction::kSize, + r3, + r4, + r5, + &gc, + TAG_OBJECT); + + __ IncrementCounter(counters->fast_new_closure_total(), 1, r9, r10); + + int map_index = (language_mode_ == CLASSIC_MODE) + ? Context::FUNCTION_MAP_INDEX + : Context::STRICT_MODE_FUNCTION_MAP_INDEX; + + // Compute the function map in the current native context and set that + // as the map of the allocated object. + __ LoadP(r5, + MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); + __ LoadP(r5, FieldMemOperand(r5, GlobalObject::kNativeContextOffset)); + __ LoadP(r8, MemOperand(r5, Context::SlotOffset(map_index))); + __ StoreP(r8, FieldMemOperand(r3, HeapObject::kMapOffset), r0); + + // Initialize the rest of the function. We don't have to update the + // write barrier because the allocated object is in new space. + __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex); + __ LoadRoot(r8, Heap::kTheHoleValueRootIndex); + __ StoreP(r4, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0); + __ StoreP(r4, FieldMemOperand(r3, JSObject::kElementsOffset), r0); + __ StoreP(r8, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset), + r0); + __ StoreP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset), r0); + __ StoreP(cp, FieldMemOperand(r3, JSFunction::kContextOffset), r0); + __ StoreP(r4, FieldMemOperand(r3, JSFunction::kLiteralsOffset), r0); + + // Initialize the code pointer in the function to be the one + // found in the shared function info object. + // But first check if there is an optimized version for our context. + Label check_optimized; + Label install_unoptimized; + if (FLAG_cache_optimized_code) { + __ LoadP(r4, + FieldMemOperand(r6, SharedFunctionInfo::kOptimizedCodeMapOffset)); + __ cmpi(r4, Operand::Zero()); + __ bne(&check_optimized); + } + __ bind(&install_unoptimized); + __ LoadRoot(r7, Heap::kUndefinedValueRootIndex); + __ StoreP(r7, FieldMemOperand(r3, JSFunction::kNextFunctionLinkOffset), r0); + __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kCodeOffset)); + __ addi(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ StoreP(r6, FieldMemOperand(r3, JSFunction::kCodeEntryOffset), r0); + + // Return result. The argument function info has been popped already. + __ Ret(); + + __ bind(&check_optimized); + + __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, r9, r10); + + // r5 holds native context, r4 points to fixed array of 3-element entries + // (native context, optimized code, literals). + // The optimized code map must never be empty, so check the first elements. + Label install_optimized; + // Speculatively move code object into r7 + __ LoadP(r7, FieldMemOperand(r4, FixedArray::kHeaderSize + kPointerSize)); + __ LoadP(r8, FieldMemOperand(r4, FixedArray::kHeaderSize)); + __ cmp(r5, r8); + __ beq(&install_optimized); + + // Iterate through the rest of map backwards. r7 holds an index as a Smi. + Label loop; + __ LoadP(r7, FieldMemOperand(r4, FixedArray::kLengthOffset)); + __ bind(&loop); + // Do not double check first entry. + + __ CmpSmiLiteral(r7, Smi::FromInt(SharedFunctionInfo::kEntryLength), r0); + __ beq(&install_unoptimized); + // Skip an entry. + __ SubSmiLiteral(r7, r7, Smi::FromInt(SharedFunctionInfo::kEntryLength), r0); + __ addi(r8, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ SmiToPtrArrayOffset(r9, r7); + __ LoadPX(r8, MemOperand(r8, r9)); + __ cmp(r5, r8); + __ bne(&loop); + // Hit: fetch the optimized code. + // TODO(penguin): potential to use x-form for this sequence + __ addi(r8, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ SmiToPtrArrayOffset(r9, r7); + __ add(r8, r8, r9); + __ LoadPU(r7, MemOperand(r8, kPointerSize)); + + __ bind(&install_optimized); + __ IncrementCounter(counters->fast_new_closure_install_optimized(), + 1, r9, r10); + + // TODO(fschneider): Idea: store proper code pointers in the map and either + // unmangle them on marking or do nothing as the whole map is discarded on + // major GC anyway. + __ addi(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ StoreP(r7, FieldMemOperand(r3, JSFunction::kCodeEntryOffset), r0); + + // Now link a function into a list of optimized functions. + __ LoadP(r7, ContextOperand(r5, Context::OPTIMIZED_FUNCTIONS_LIST)); + + __ StoreP(r7, FieldMemOperand(r3, JSFunction::kNextFunctionLinkOffset), r0); + // No need for write barrier as JSFunction (eax) is in the new space. + + __ StoreP(r3, ContextOperand(r5, Context::OPTIMIZED_FUNCTIONS_LIST), r0); + // Store JSFunction (eax) into edx before issuing write barrier as + // it clobbers all the registers passed. + __ mr(r7, r3); + __ RecordWriteContextSlot( + r5, + Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST), + r7, + r4, + kLRHasNotBeenSaved, + kDontSaveFPRegs); + + // Return result. The argument function info has been popped already. + __ Ret(); + + // Create a new closure through the slower runtime call. + __ bind(&gc); + __ LoadRoot(r7, Heap::kFalseValueRootIndex); + __ Push(cp, r6, r7); + __ TailCallRuntime(Runtime::kNewClosure, 3, 1); +} + + +void FastNewContextStub::Generate(MacroAssembler* masm) { + // Try to allocate the context in new space. + Label gc; + int length = slots_ + Context::MIN_CONTEXT_SLOTS; + + // Attempt to allocate the context in new space. + __ AllocateInNewSpace(FixedArray::SizeFor(length), + r3, + r4, + r5, + &gc, + TAG_OBJECT); + + // Load the function from the stack. + __ LoadP(r6, MemOperand(sp, 0)); + + // Set up the object header. + __ LoadRoot(r4, Heap::kFunctionContextMapRootIndex); + __ LoadSmiLiteral(r5, Smi::FromInt(length)); + __ StoreP(r5, FieldMemOperand(r3, FixedArray::kLengthOffset), r0); + __ StoreP(r4, FieldMemOperand(r3, HeapObject::kMapOffset), r0); + + // Set up the fixed slots, copy the global object from the previous context. + __ LoadP(r5, + MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); + __ LoadSmiLiteral(r4, Smi::FromInt(0)); + __ StoreP(r6, MemOperand(r3, Context::SlotOffset(Context::CLOSURE_INDEX)), + r0); + __ StoreP(cp, MemOperand(r3, Context::SlotOffset(Context::PREVIOUS_INDEX)), + r0); + __ StoreP(r4, MemOperand(r3, Context::SlotOffset(Context::EXTENSION_INDEX)), + r0); + __ StoreP(r5, + MemOperand(r3, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)), + r0); + + // Initialize the rest of the slots to undefined. + __ LoadRoot(r4, Heap::kUndefinedValueRootIndex); + for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { + __ StoreP(r4, MemOperand(r3, Context::SlotOffset(i)), r0); + } + + // Remove the on-stack argument and return. + __ mr(cp, r3); + __ pop(); + __ Ret(); + + // Need to collect. Call into runtime system. + __ bind(&gc); + __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1); +} + + +void FastNewBlockContextStub::Generate(MacroAssembler* masm) { + // Stack layout on entry: + // + // [sp]: function. + // [sp + kPointerSize]: serialized scope info + + // Try to allocate the context in new space. + Label gc; + int length = slots_ + Context::MIN_CONTEXT_SLOTS; + __ AllocateInNewSpace(FixedArray::SizeFor(length), + r3, r4, r5, &gc, TAG_OBJECT); + + // Load the function from the stack. + __ LoadP(r6, MemOperand(sp, 0)); + + // Load the serialized scope info from the stack. + __ LoadP(r4, MemOperand(sp, 1 * kPointerSize)); + + // Set up the object header. + __ LoadRoot(r5, Heap::kBlockContextMapRootIndex); + __ StoreP(r5, FieldMemOperand(r3, HeapObject::kMapOffset), r0); + __ LoadSmiLiteral(r5, Smi::FromInt(length)); + __ StoreP(r5, FieldMemOperand(r3, FixedArray::kLengthOffset), r0); + + // If this block context is nested in the native context we get a smi + // sentinel instead of a function. The block context should get the + // canonical empty function of the native context as its closure which + // we still have to look up. + Label after_sentinel; + __ JumpIfNotSmi(r6, &after_sentinel); + if (FLAG_debug_code) { + const char* message = "Expected 0 as a Smi sentinel"; + __ cmpi(r6, Operand::Zero()); + __ Assert(eq, message); + } + __ LoadP(r6, GlobalObjectOperand()); + __ LoadP(r6, FieldMemOperand(r6, GlobalObject::kNativeContextOffset)); + __ LoadP(r6, ContextOperand(r6, Context::CLOSURE_INDEX)); + __ bind(&after_sentinel); + + // Set up the fixed slots, copy the global object from the previous context. + __ LoadP(r5, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); + __ StoreP(r6, ContextOperand(r3, Context::CLOSURE_INDEX), r0); + __ StoreP(cp, ContextOperand(r3, Context::PREVIOUS_INDEX), r0); + __ StoreP(r4, ContextOperand(r3, Context::EXTENSION_INDEX), r0); + __ StoreP(r5, ContextOperand(r3, Context::GLOBAL_OBJECT_INDEX), r0); + + // Initialize the rest of the slots to the hole value. + __ LoadRoot(r4, Heap::kTheHoleValueRootIndex); + for (int i = 0; i < slots_; i++) { + __ StoreP(r4, ContextOperand(r3, i + Context::MIN_CONTEXT_SLOTS), r0); + } + + // Remove the on-stack argument and return. + __ mr(cp, r3); + __ addi(sp, sp, Operand(2 * kPointerSize)); + __ Ret(); + + // Need to collect. Call into runtime system. + __ bind(&gc); + __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1); +} + + +static void GenerateFastCloneShallowArrayCommon( + MacroAssembler* masm, + int length, + FastCloneShallowArrayStub::Mode mode, + Label* fail) { + // Registers on entry: + // + // r6: boilerplate literal array. + ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS); + + // All sizes here are multiples of kPointerSize. + int elements_size = 0; + if (length > 0) { + elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS + ? FixedDoubleArray::SizeFor(length) + : FixedArray::SizeFor(length); + } + int size = JSArray::kSize + elements_size; + + // Allocate both the JS array and the elements array in one big + // allocation. This avoids multiple limit checks. + __ AllocateInNewSpace(size, + r3, + r4, + r5, + fail, + TAG_OBJECT); + + // Copy the JS array part. + for (int i = 0; i < JSArray::kSize; i += kPointerSize) { + if ((i != JSArray::kElementsOffset) || (length == 0)) { + __ LoadP(r4, FieldMemOperand(r6, i)); + __ StoreP(r4, FieldMemOperand(r3, i), r0); + } + } + + if (length > 0) { + // Get hold of the elements array of the boilerplate and setup the + // elements pointer in the resulting object. + __ LoadP(r6, FieldMemOperand(r6, JSArray::kElementsOffset)); + __ addi(r5, r3, Operand(JSArray::kSize)); + __ StoreP(r5, FieldMemOperand(r3, JSArray::kElementsOffset), r0); + + // Copy the elements array. + ASSERT((elements_size % kPointerSize) == 0); + __ CopyFields(r5, r6, r4.bit(), elements_size / kPointerSize); + } +} + +void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { + // Stack layout on entry: + // + // [sp]: constant elements. + // [sp + kPointerSize]: literal index. + // [sp + (2 * kPointerSize)]: literals array. + + // Load boilerplate object into r3 and check if we need to create a + // boilerplate. + Label slow_case; + __ LoadP(r6, MemOperand(sp, 2 * kPointerSize)); + __ LoadP(r3, MemOperand(sp, 1 * kPointerSize)); + __ addi(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + + __ mr(r0, r3); + __ SmiToPtrArrayOffset(r3, r3); + __ LoadPX(r6, MemOperand(r6, r3)); + __ mr(r3, r0); + + __ CompareRoot(r6, Heap::kUndefinedValueRootIndex); + __ beq(&slow_case); + + FastCloneShallowArrayStub::Mode mode = mode_; + if (mode == CLONE_ANY_ELEMENTS) { + Label double_elements, check_fast_elements; + __ LoadP(r3, FieldMemOperand(r6, JSArray::kElementsOffset)); + __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset)); + __ CompareRoot(r3, Heap::kFixedCOWArrayMapRootIndex); + __ bne(&check_fast_elements); + GenerateFastCloneShallowArrayCommon(masm, 0, + COPY_ON_WRITE_ELEMENTS, &slow_case); + // Return and remove the on-stack parameters. + __ addi(sp, sp, Operand(3 * kPointerSize)); + __ Ret(); + + __ bind(&check_fast_elements); + __ CompareRoot(r3, Heap::kFixedArrayMapRootIndex); + __ bne(&double_elements); + GenerateFastCloneShallowArrayCommon(masm, length_, + CLONE_ELEMENTS, &slow_case); + // Return and remove the on-stack parameters. + __ addi(sp, sp, Operand(3 * kPointerSize)); + __ Ret(); + + __ bind(&double_elements); + mode = CLONE_DOUBLE_ELEMENTS; + // Fall through to generate the code to handle double elements. + } + + if (FLAG_debug_code) { + const char* message; + Heap::RootListIndex expected_map_index; + if (mode == CLONE_ELEMENTS) { + message = "Expected (writable) fixed array"; + expected_map_index = Heap::kFixedArrayMapRootIndex; + } else if (mode == CLONE_DOUBLE_ELEMENTS) { + message = "Expected (writable) fixed double array"; + expected_map_index = Heap::kFixedDoubleArrayMapRootIndex; + } else { + ASSERT(mode == COPY_ON_WRITE_ELEMENTS); + message = "Expected copy-on-write fixed array"; + expected_map_index = Heap::kFixedCOWArrayMapRootIndex; + } + __ push(r6); + __ LoadP(r6, FieldMemOperand(r6, JSArray::kElementsOffset)); + __ LoadP(r6, FieldMemOperand(r6, HeapObject::kMapOffset)); + __ CompareRoot(r6, expected_map_index); + __ Assert(eq, message); + __ pop(r6); + } + + GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case); + + // Return and remove the on-stack parameters. + __ addi(sp, sp, Operand(3 * kPointerSize)); + __ Ret(); + + __ bind(&slow_case); + __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1); +} + + +void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) { + // Stack layout on entry: + // + // [sp]: object literal flags. + // [sp + kPointerSize]: constant properties. + // [sp + (2 * kPointerSize)]: literal index. + // [sp + (3 * kPointerSize)]: literals array. + + // Load boilerplate object into r3 and check if we need to create a + // boilerplate. + Label slow_case; + __ LoadP(r6, MemOperand(sp, 3 * kPointerSize)); + __ LoadP(r3, MemOperand(sp, 2 * kPointerSize)); + __ addi(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ mr(r0, r3); + __ SmiToPtrArrayOffset(r3, r3); + __ LoadPX(r6, MemOperand(r6, r3)); + __ mr(r3, r0); + + __ CompareRoot(r6, Heap::kUndefinedValueRootIndex); + __ beq(&slow_case); + + // Check that the boilerplate contains only fast properties and we can + // statically determine the instance size. + int size = JSObject::kHeaderSize + length_ * kPointerSize; + __ LoadP(r3, FieldMemOperand(r6, HeapObject::kMapOffset)); + __ lbz(r3, FieldMemOperand(r3, Map::kInstanceSizeOffset)); + __ cmpi(r3, Operand(size >> kPointerSizeLog2)); + __ bne(&slow_case); + + // Allocate the JS object and copy header together with all in-object + // properties from the boilerplate. + __ AllocateInNewSpace(size, r3, r4, r5, &slow_case, TAG_OBJECT); + for (int i = 0; i < size; i += kPointerSize) { + __ LoadP(r4, FieldMemOperand(r6, i)); + __ StoreP(r4, FieldMemOperand(r3, i), r0); + } + + // Return and remove the on-stack parameters. + __ addi(sp, sp, Operand(4 * kPointerSize)); + __ Ret(); + + __ bind(&slow_case); + __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1); +} + + +// Takes a Smi and converts to an IEEE 64 bit floating point value in two +// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and +// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a +// scratch register. Destroys the source register. No GC occurs during this +// stub so you don't have to set up the frame. +class ConvertToDoubleStub : public CodeStub { + public: + ConvertToDoubleStub(Register result_reg_1, + Register result_reg_2, + Register source_reg, + Register scratch_reg) + : result1_(result_reg_1), + result2_(result_reg_2), + source_(source_reg), + zeros_(scratch_reg) { } + + private: + Register result1_; + Register result2_; + Register source_; + Register zeros_; + + // Minor key encoding in 16 bits. + class ModeBits: public BitField {}; + class OpBits: public BitField {}; + + Major MajorKey() { return ConvertToDouble; } + int MinorKey() { + // Encode the parameters in a unique 16 bit value. + return result1_.code() + + (result2_.code() << 4) + + (source_.code() << 8) + + (zeros_.code() << 12); + } + + void Generate(MacroAssembler* masm); +}; + +void FloatingPointHelper::LoadSmis(MacroAssembler* masm, + Register scratch1, + Register scratch2) { + __ SmiToDoubleFPRegister(r3, d2, scratch1); + __ SmiToDoubleFPRegister(r4, d1, scratch1); +} + +// needs cleanup for extra parameters that are unused +void FloatingPointHelper::LoadOperands( + MacroAssembler* masm, + Register heap_number_map, + Register scratch1, + Register scratch2, + Label* slow) { + // Load right operand (r3) to d2 + LoadNumber(masm, r3, d2, heap_number_map, scratch1, scratch2, slow); + + // Load left operand (r4) to d1 + LoadNumber(masm, r4, d1, heap_number_map, scratch1, scratch2, slow); +} + +// needs cleanup for extra parameters that are unused +// also needs a scratch double register instead of d3 +void FloatingPointHelper::LoadNumber(MacroAssembler* masm, + Register object, + DwVfpRegister dst, + Register heap_number_map, + Register scratch1, + Register scratch2, + Label* not_number) { + __ AssertRootValue(heap_number_map, + Heap::kHeapNumberMapRootIndex, + "HeapNumberMap register clobbered."); + + Label is_smi, done; + + // Smi-check + __ UntagAndJumpIfSmi(scratch1, object, &is_smi); + // Heap number check + __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); + + // Handle loading a double from a heap number + // Load the double from tagged HeapNumber to double register. + __ lfd(dst, FieldMemOperand(object, HeapNumber::kValueOffset)); + __ b(&done); + + // Handle loading a double from a smi. + __ bind(&is_smi); + + // Convert untagged smi to double using FP instructions. + FloatingPointHelper::ConvertIntToDouble(masm, scratch1, dst); + + __ bind(&done); +} + + +void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm, + Register object, + Register dst, + Register heap_number_map, + Register scratch1, + Register scratch2, + Register scratch3, + DwVfpRegister double_scratch, + Label* not_number) { + __ AssertRootValue(heap_number_map, + Heap::kHeapNumberMapRootIndex, + "HeapNumberMap register clobbered."); + Label done; + Label not_in_int32_range; + + __ UntagAndJumpIfSmi(dst, object, &done); + __ LoadP(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset)); + __ cmp(scratch1, heap_number_map); + __ bne(not_number); + __ ConvertToInt32(object, + dst, + scratch1, + scratch2, + double_scratch, + ¬_in_int32_range); + __ b(&done); + + __ bind(¬_in_int32_range); + __ lwz(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); + __ lwz(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset)); + + __ EmitOutOfInt32RangeTruncate(dst, + scratch1, + scratch2, + scratch3); + __ bind(&done); +} + + +void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, + Register src, + DwVfpRegister double_dst) { + ASSERT(!src.is(r0)); + + __ subi(sp, sp, Operand(8)); // reserve one temporary double on the stack + + // sign-extend src to 64-bit and store it to temp double on the stack +#if V8_TARGET_ARCH_PPC64 + __ extsw(r0, src); + __ std(r0, MemOperand(sp, 0)); +#else + __ srawi(r0, src, 31); +#if __FLOAT_WORD_ORDER == __LITTLE_ENDIAN + __ stw(r0, MemOperand(sp, 4)); + __ stw(src, MemOperand(sp, 0)); +#else + __ stw(r0, MemOperand(sp, 0)); + __ stw(src, MemOperand(sp, 4)); +#endif +#endif + + // load into FPR + __ lfd(double_dst, MemOperand(sp, 0)); + + __ addi(sp, sp, Operand(8)); // restore stack + + // convert to double + __ fcfid(double_dst, double_dst); +} + + +void FloatingPointHelper::ConvertUnsignedIntToDouble(MacroAssembler* masm, + Register src, + DwVfpRegister double_dst) { + ASSERT(!src.is(r0)); + + __ subi(sp, sp, Operand(8)); // reserve one temporary double on the stack + + // zero-extend src to 64-bit and store it to temp double on the stack +#if V8_TARGET_ARCH_PPC64 + __ clrldi(r0, src, Operand(32)); + __ std(r0, MemOperand(sp, 0)); +#else + __ li(r0, Operand::Zero()); +#if __FLOAT_WORD_ORDER == __LITTLE_ENDIAN + __ stw(r0, MemOperand(sp, 4)); + __ stw(src, MemOperand(sp, 0)); +#else + __ stw(r0, MemOperand(sp, 0)); + __ stw(src, MemOperand(sp, 4)); +#endif +#endif + + // load into FPR + __ lfd(double_dst, MemOperand(sp, 0)); + + __ addi(sp, sp, Operand(8)); // restore stack + + // convert to double + __ fcfid(double_dst, double_dst); +} + +void FloatingPointHelper::ConvertIntToFloat(MacroAssembler* masm, + const DwVfpRegister dst, + const Register src, + const Register int_scratch) { + __ subi(sp, sp, Operand(8)); // reserve one temporary double on the stack + + // sign-extend src to 64-bit and store it to temp double on the stack +#if V8_TARGET_ARCH_PPC64 + __ extsw(int_scratch, src); + __ std(int_scratch, MemOperand(sp, 0)); +#else + __ srawi(int_scratch, src, 31); +#if __FLOAT_WORD_ORDER == __LITTLE_ENDIAN + __ stw(int_scratch, MemOperand(sp, 4)); + __ stw(src, MemOperand(sp, 0)); +#else + __ stw(int_scratch, MemOperand(sp, 0)); + __ stw(src, MemOperand(sp, 4)); +#endif +#endif + + // load sign-extended src into FPR + __ lfd(dst, MemOperand(sp, 0)); + + __ addi(sp, sp, Operand(8)); // restore stack + + __ fcfid(dst, dst); + __ frsp(dst, dst); +} + +void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, + Register object, + DwVfpRegister double_dst, + DwVfpRegister double_scratch, + Register heap_number_map, + Register scratch1, + Register scratch2, + Label* not_int32) { + ASSERT(!scratch1.is(object) && !scratch2.is(object)); + ASSERT(!scratch1.is(scratch2)); + ASSERT(!heap_number_map.is(object) && + !heap_number_map.is(scratch1) && + !heap_number_map.is(scratch2)); + + Label done, obj_is_not_smi; + + __ JumpIfNotSmi(object, &obj_is_not_smi); + __ SmiUntag(scratch1, object); + ConvertIntToDouble(masm, scratch1, double_dst); + __ b(&done); + + __ bind(&obj_is_not_smi); + __ AssertRootValue(heap_number_map, + Heap::kHeapNumberMapRootIndex, + "HeapNumberMap register clobbered."); + __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); + + // Load the double value. + __ lfd(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset)); + + __ EmitVFPTruncate(kRoundToZero, + scratch1, + double_dst, + scratch2, + double_scratch, + kCheckForInexactConversion); + + // Jump to not_int32 if the operation did not succeed. + __ bne(not_int32); + + __ bind(&done); +} + + +void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, + Register object, + Register dst, + Register heap_number_map, + Register scratch1, + Register scratch2, + Register scratch3, + DwVfpRegister double_scratch0, + DwVfpRegister double_scratch1, + Label* not_int32) { + ASSERT(!dst.is(object)); + ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object)); + ASSERT(!scratch1.is(scratch2) && + !scratch1.is(scratch3) && + !scratch2.is(scratch3)); + + Label done; + + __ UntagAndJumpIfSmi(dst, object, &done); + + __ AssertRootValue(heap_number_map, + Heap::kHeapNumberMapRootIndex, + "HeapNumberMap register clobbered."); + __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); + + // Load the double value. + __ lfd(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset)); + + __ EmitVFPTruncate(kRoundToZero, + dst, + double_scratch0, + scratch1, + double_scratch1, + kCheckForInexactConversion); + + // Jump to not_int32 if the operation did not succeed. + __ bne(not_int32); + + __ bind(&done); +} + + +void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, + Register src1, + Register src2, + Register dst, + Register scratch, + Label* not_int32) { + // Get exponent alone in scratch. + STATIC_ASSERT(HeapNumber::kExponentMask == 0x7ff00000u); + __ ExtractBitMask(scratch, src1, HeapNumber::kExponentMask); + + // Substract the bias from the exponent. + __ addi(scratch, scratch, Operand(-HeapNumber::kExponentBias)); + + // src1: higher (exponent) part of the double value. + // src2: lower (mantissa) part of the double value. + // scratch: unbiased exponent. + + // Fast cases. Check for obvious non 32-bit integer values. + // Negative exponent cannot yield 32-bit integers. + __ cmpi(scratch, Operand::Zero()); + __ blt(not_int32); + // Exponent greater than 31 cannot yield 32-bit integers. + // Also, a positive value with an exponent equal to 31 is outside of the + // signed 32-bit integer range. + // Another way to put it is that if (exponent - signbit) > 30 then the + // number cannot be represented as an int32. + Register tmp = dst; + __ ExtractSignBit32(tmp, src1); + __ sub(tmp, scratch, tmp); + __ cmpi(tmp, Operand(30)); + __ bgt(not_int32); + // - Check whether bits [21:0] in the mantissa are not null. + __ TestBitRange(src2, 21, 0, r0); + __ bne(not_int32, cr0); + + // Otherwise the exponent needs to be big enough to shift left all the + // non zero bits left. So we need the (30 - exponent) last bits of the + // 31 higher bits of the mantissa to be null. + // Because bits [21:0] are null, we can check instead that the + // (32 - exponent) last bits of the 32 higher bits of the mantissa are null. + + // Get the 32 higher bits of the mantissa in dst. + STATIC_ASSERT(HeapNumber::kMantissaBitsInTopWord == 20); + STATIC_ASSERT(HeapNumber::kNonMantissaBitsInTopWord == 12); + __ ExtractBitRange(dst, src2, 31, HeapNumber::kMantissaBitsInTopWord); + __ slwi(src1, src1, Operand(HeapNumber::kNonMantissaBitsInTopWord)); + __ orx(dst, dst, src1); + + // Create the mask and test the lower bits (of the higher bits). + __ subfic(scratch, scratch, Operand(32)); + __ li(src2, Operand(1)); + __ ShiftLeft(src1, src2, scratch); + __ addi(src1, src1, Operand(-1)); + __ and_(r0, dst, src1, SetRC); + __ bne(not_int32, cr0); +} + + +void FloatingPointHelper::CallCCodeForDoubleOperation( + MacroAssembler* masm, + Token::Value op, + Register heap_number_result, + Register scratch) { + // d1 - first arg, d2 - second arg + // d1 return value + + // Assert that heap_number_result is callee-saved. + // PowerPC doesn't preserve r8.. need to handle this specially + // We currently always use r8 to pass it. + ASSERT(heap_number_result.is(r8)); + __ push(r8); + + // Push the current return address before the C call. Return will be + // through pop() below. + __ mflr(r0); + __ push(r0); + __ PrepareCallCFunction(0, 2, scratch); + + { + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction( + ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); + } + // load saved r8 value, restore lr + __ pop(r0); + __ mtlr(r0); + __ pop(r8); + + // Store answer in the overwritable heap number. Double returned in d1 + __ stfd(d1, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); + + // Place heap_number_result in r3 and return to the pushed return address. + __ mr(r3, heap_number_result); + __ blr(); +} + +// Handle the case where the lhs and rhs are the same object. +// Equality is almost reflexive (everything but NaN), so this is a test +// for "identity and not NaN". +static void EmitIdenticalObjectComparison(MacroAssembler* masm, + Label* slow, + Condition cond, + bool never_nan_nan) { + Label not_identical; + Label heap_number, return_equal; + __ cmp(r3, r4); + __ bne(¬_identical); + + // The two objects are identical. If we know that one of them isn't NaN then + // we now know they test equal. + if (cond != eq || !never_nan_nan) { + // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(), + // so we do the second best thing - test it ourselves. + // They are both equal and they are not both Smis so both of them are not + // Smis. If it's not a heap number, then return equal. + if (cond == lt || cond == gt) { + __ CompareObjectType(r3, r7, r7, FIRST_SPEC_OBJECT_TYPE); + __ bge(slow); + } else { + __ CompareObjectType(r3, r7, r7, HEAP_NUMBER_TYPE); + __ beq(&heap_number); + // Comparing JS objects with <=, >= is complicated. + if (cond != eq) { + __ cmpi(r7, Operand(FIRST_SPEC_OBJECT_TYPE)); + __ bge(slow); + // Normally here we fall through to return_equal, but undefined is + // special: (undefined == undefined) == true, but + // (undefined <= undefined) == false! See ECMAScript 11.8.5. + if (cond == le || cond == ge) { + __ cmpi(r7, Operand(ODDBALL_TYPE)); + __ bne(&return_equal); + __ LoadRoot(r5, Heap::kUndefinedValueRootIndex); + __ cmp(r3, r5); + __ bne(&return_equal); + if (cond == le) { + // undefined <= undefined should fail. + __ li(r3, Operand(GREATER)); + } else { + // undefined >= undefined should fail. + __ li(r3, Operand(LESS)); + } + __ Ret(); + } + } + } + } + + __ bind(&return_equal); + if (cond == lt) { + __ li(r3, Operand(GREATER)); // Things aren't less than themselves. + } else if (cond == gt) { + __ li(r3, Operand(LESS)); // Things aren't greater than themselves. + } else { + __ li(r3, Operand(EQUAL)); // Things are <=, >=, ==, === themselves. + } + __ Ret(); + + if (cond != eq || !never_nan_nan) { + // For less and greater we don't have to check for NaN since the result of + // x < x is false regardless. For the others here is some code to check + // for NaN. + if (cond != lt && cond != gt) { + __ bind(&heap_number); + // It is a heap number, so return non-equal if it's NaN and equal if it's + // not NaN. + + // The representation of NaN values has all exponent bits (52..62) set, + // and not all mantissa bits (0..51) clear. + // Read top bits of double representation (second word of value). + __ lwz(r5, FieldMemOperand(r3, HeapNumber::kExponentOffset)); + // Test that exponent bits are all set. + STATIC_ASSERT(HeapNumber::kExponentMask == 0x7ff00000u); + __ ExtractBitMask(r6, r5, HeapNumber::kExponentMask); + __ cmpli(r6, Operand(0x7ff)); + __ bne(&return_equal); + + // Shift out flag and all exponent bits, retaining only mantissa. + __ slwi(r5, r5, Operand(HeapNumber::kNonMantissaBitsInTopWord)); + // Or with all low-bits of mantissa. + __ lwz(r6, FieldMemOperand(r3, HeapNumber::kMantissaOffset)); + __ orx(r3, r6, r5); + __ cmpi(r3, Operand::Zero()); + // For equal we already have the right value in r3: Return zero (equal) + // if all bits in mantissa are zero (it's an Infinity) and non-zero if + // not (it's a NaN). For <= and >= we need to load r0 with the failing + // value if it's a NaN. + if (cond != eq) { + Label not_equal; + __ bne(¬_equal); + // All-zero means Infinity means equal. + __ Ret(); + __ bind(¬_equal); + if (cond == le) { + __ li(r3, Operand(GREATER)); // NaN <= NaN should fail. + } else { + __ li(r3, Operand(LESS)); // NaN >= NaN should fail. + } + } + __ Ret(); + } + // No fall through here. + } + + __ bind(¬_identical); +} + + +// See comment at call site. +static void EmitSmiNonsmiComparison(MacroAssembler* masm, + Register lhs, + Register rhs, + Label* lhs_not_nan, + Label* slow, + bool strict) { + ASSERT((lhs.is(r3) && rhs.is(r4)) || + (lhs.is(r4) && rhs.is(r3))); + + Label rhs_is_smi; + __ JumpIfSmi(rhs, &rhs_is_smi); + + // Lhs is a Smi. Check whether the rhs is a heap number. + __ CompareObjectType(rhs, r6, r7, HEAP_NUMBER_TYPE); + if (strict) { + // If rhs is not a number and lhs is a Smi then strict equality cannot + // succeed. Return non-equal + // If rhs is r3 then there is already a non zero value in it. + Label skip; + __ beq(&skip); + if (!rhs.is(r3)) { + __ mov(r3, Operand(NOT_EQUAL)); + } + __ Ret(); + __ bind(&skip); + } else { + // Smi compared non-strictly with a non-Smi non-heap-number. Call + // the runtime. + __ bne(slow); + } + + // Lhs is a smi, rhs is a number. + // Convert lhs to a double in d7. + __ SmiToDoubleFPRegister(lhs, d7, r10); + // Load the double from rhs, tagged HeapNumber r3, to d6. + __ lfd(d6, FieldMemOperand(rhs, HeapNumber::kValueOffset)); + + // We now have both loaded as doubles but we can skip the lhs nan check + // since it's a smi. + __ b(lhs_not_nan); + + __ bind(&rhs_is_smi); + // Rhs is a smi. Check whether the non-smi lhs is a heap number. + __ CompareObjectType(lhs, r7, r7, HEAP_NUMBER_TYPE); + if (strict) { + // If lhs is not a number and rhs is a smi then strict equality cannot + // succeed. Return non-equal. + // If lhs is r3 then there is already a non zero value in it. + Label skip; + __ beq(&skip); + if (!lhs.is(r3)) { + __ mov(r3, Operand(NOT_EQUAL)); + } + __ Ret(); + __ bind(&skip); + } else { + // Smi compared non-strictly with a non-smi non-heap-number. Call + // the runtime. + __ bne(slow); + } + + // Rhs is a smi, lhs is a heap number. + // Load the double from lhs, tagged HeapNumber r4, to d7. + __ lfd(d7, FieldMemOperand(lhs, HeapNumber::kValueOffset)); + // Convert rhs to a double in d6. + __ SmiToDoubleFPRegister(rhs, d6, r10); + // Fall through to both_loaded_as_doubles. +} + +// See comment at call site. +static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, + Register lhs, + Register rhs) { + ASSERT((lhs.is(r3) && rhs.is(r4)) || + (lhs.is(r4) && rhs.is(r3))); + + // If either operand is a JS object or an oddball value, then they are + // not equal since their pointers are different. + // There is no test for undetectability in strict equality. + STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); + Label first_non_object; + // Get the type of the first operand into r5 and compare it with + // FIRST_SPEC_OBJECT_TYPE. + __ CompareObjectType(rhs, r5, r5, FIRST_SPEC_OBJECT_TYPE); + __ blt(&first_non_object); + + // Return non-zero (r3 is not zero) + Label return_not_equal; + __ bind(&return_not_equal); + __ Ret(); + + __ bind(&first_non_object); + // Check for oddballs: true, false, null, undefined. + __ cmpi(r5, Operand(ODDBALL_TYPE)); + __ beq(&return_not_equal); + + __ CompareObjectType(lhs, r6, r6, FIRST_SPEC_OBJECT_TYPE); + __ bge(&return_not_equal); + + // Check for oddballs: true, false, null, undefined. + __ cmpi(r6, Operand(ODDBALL_TYPE)); + __ beq(&return_not_equal); + + // Now that we have the types we might as well check for symbol-symbol. + // Ensure that no non-strings have the symbol bit set. + STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask); + STATIC_ASSERT(kSymbolTag != 0); + __ and_(r5, r5, r6); + __ andi(r0, r5, Operand(kIsSymbolMask)); + __ bne(&return_not_equal, cr0); +} + + +static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, + Register lhs, + Register rhs, + Label* both_loaded_as_doubles, + Label* not_heap_numbers, + Label* slow) { + ASSERT((lhs.is(r3) && rhs.is(r4)) || + (lhs.is(r4) && rhs.is(r3))); + + __ CompareObjectType(rhs, r6, r5, HEAP_NUMBER_TYPE); + __ bne(not_heap_numbers); + __ LoadP(r5, FieldMemOperand(lhs, HeapObject::kMapOffset)); + __ cmp(r5, r6); + __ bne(slow); // First was a heap number, second wasn't. Go slow case. + + // Both are heap numbers. Load them up then jump to the code we have + // for that. + __ lfd(d6, FieldMemOperand(rhs, HeapNumber::kValueOffset)); + __ lfd(d7, FieldMemOperand(lhs, HeapNumber::kValueOffset)); + + __ b(both_loaded_as_doubles); +} + + +// Fast negative check for symbol-to-symbol equality. +static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm, + Register lhs, + Register rhs, + Label* possible_strings, + Label* not_both_strings) { + ASSERT((lhs.is(r3) && rhs.is(r4)) || + (lhs.is(r4) && rhs.is(r3))); + + // r5 is object type of rhs. + // Ensure that no non-strings have the symbol bit set. + Label object_test; + STATIC_ASSERT(kSymbolTag != 0); + __ andi(r0, r5, Operand(kIsNotStringMask)); + __ bne(&object_test, cr0); + __ andi(r0, r5, Operand(kIsSymbolMask)); + __ beq(possible_strings, cr0); + __ CompareObjectType(lhs, r6, r6, FIRST_NONSTRING_TYPE); + __ bge(not_both_strings); + __ andi(r0, r6, Operand(kIsSymbolMask)); + __ beq(possible_strings, cr0); + + // Both are symbols. We already checked they weren't the same pointer + // so they are not equal. + __ li(r3, Operand(NOT_EQUAL)); + __ Ret(); + + __ bind(&object_test); + __ cmpi(r5, Operand(FIRST_SPEC_OBJECT_TYPE)); + __ blt(not_both_strings); + __ CompareObjectType(lhs, r5, r6, FIRST_SPEC_OBJECT_TYPE); + __ blt(not_both_strings); + // If both objects are undetectable, they are equal. Otherwise, they + // are not equal, since they are different objects and an object is not + // equal to undefined. + __ LoadP(r6, FieldMemOperand(rhs, HeapObject::kMapOffset)); + __ lbz(r5, FieldMemOperand(r5, Map::kBitFieldOffset)); + __ lbz(r6, FieldMemOperand(r6, Map::kBitFieldOffset)); + __ and_(r3, r5, r6); + __ andi(r3, r3, Operand(1 << Map::kIsUndetectable)); + __ xori(r3, r3, Operand(1 << Map::kIsUndetectable)); + __ Ret(); +} + + +void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, + Register object, + Register result, + Register scratch1, + Register scratch2, + Register scratch3, + bool object_is_smi, + Label* not_found) { + // Use of registers. Register result is used as a temporary. + Register number_string_cache = result; + Register mask = scratch3; + + // Load the number string cache. + __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); + + // Make the hash mask from the length of the number string cache. It + // contains two elements (number and string) for each cache entry. + __ LoadP(mask, FieldMemOperand(number_string_cache, + FixedArray::kLengthOffset)); + // Divide length by two (length is a smi). + __ ShiftRightArithImm(mask, mask, kSmiTagSize + kSmiShiftSize + 1); + __ subi(mask, mask, Operand(1)); // Make mask. + + // Calculate the entry in the number string cache. The hash value in the + // number string cache for smis is just the smi value, and the hash for + // doubles is the xor of the upper and lower words. See + // Heap::GetNumberStringCache. + Isolate* isolate = masm->isolate(); + Label is_smi; + Label load_result_from_cache; + if (!object_is_smi) { + __ JumpIfSmi(object, &is_smi); + + __ CheckMap(object, + scratch1, + Heap::kHeapNumberMapRootIndex, + not_found, + DONT_DO_SMI_CHECK); + + STATIC_ASSERT(8 == kDoubleSize); + __ lwz(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); + __ lwz(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset)); + __ xor_(scratch1, scratch1, scratch2); + __ and_(scratch1, scratch1, mask); + + // Calculate address of entry in string cache: each entry consists + // of two pointer sized fields. + __ ShiftLeftImm(scratch1, scratch1, Operand(kPointerSizeLog2 + 1)); + __ add(scratch1, number_string_cache, scratch1); + + Register probe = mask; + __ LoadP(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize)); + __ JumpIfSmi(probe, not_found); + __ lfd(d0, FieldMemOperand(object, HeapNumber::kValueOffset)); + __ lfd(d1, FieldMemOperand(probe, HeapNumber::kValueOffset)); + __ fcmpu(d0, d1); + __ bne(not_found); // The cache did not contain this value. + __ b(&load_result_from_cache); + } + + __ bind(&is_smi); + Register scratch = scratch1; + __ SmiUntag(scratch, object); + __ and_(scratch, mask, scratch); + // Calculate address of entry in string cache: each entry consists + // of two pointer sized fields. + __ ShiftLeftImm(scratch, scratch, Operand(kPointerSizeLog2 + 1)); + __ add(scratch, number_string_cache, scratch); + + // Check if the entry is the smi we are looking for. + Register probe = mask; + __ LoadP(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize)); + __ cmp(object, probe); + __ bne(not_found); + + // Get the result from the cache. + __ bind(&load_result_from_cache); + __ LoadP(result, + FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize)); + __ IncrementCounter(isolate->counters()->number_to_string_native(), + 1, + scratch1, + scratch2); +} + + +void NumberToStringStub::Generate(MacroAssembler* masm) { + Label runtime; + + __ LoadP(r4, MemOperand(sp, 0)); + + // Generate code to lookup number in the number string cache. + GenerateLookupNumberStringCache(masm, r4, r3, r5, r6, r7, false, &runtime); + __ addi(sp, sp, Operand(1 * kPointerSize)); + __ Ret(); + + __ bind(&runtime); + // Handle number to string in the runtime system if not found in the cache. + __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1); +} + + +// On entry lhs_ and rhs_ are the values to be compared. +// On exit r3 is 0, positive or negative to indicate the result of +// the comparison. +void CompareStub::Generate(MacroAssembler* masm) { + ASSERT((lhs_.is(r3) && rhs_.is(r4)) || + (lhs_.is(r4) && rhs_.is(r3))); + + Label slow; // Call builtin. + Label not_smis, both_loaded_as_doubles, lhs_not_nan; + + if (include_smi_compare_) { + Label not_two_smis, smi_done; + __ orx(r5, r4, r3); + __ JumpIfNotSmi(r5, ¬_two_smis); + __ SmiUntag(r4); + __ SmiUntag(r3); + __ sub(r3, r4, r3); + __ Ret(); + __ bind(¬_two_smis); + } else if (FLAG_debug_code) { + __ orx(r5, r4, r3); + STATIC_ASSERT(kSmiTagMask < 0x8000); + __ andi(r0, r5, Operand(kSmiTagMask)); + __ Assert(ne, "CompareStub: unexpected smi operands.", cr0); + } + + // NOTICE! This code is only reached after a smi-fast-case check, so + // it is certain that at least one operand isn't a smi. + + // Handle the case where the objects are identical. Either returns the answer + // or goes to slow. Only falls through if the objects were not identical. + EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_); + + // If either is a Smi (we know that not both are), then they can only + // be strictly equal if the other is a HeapNumber. + STATIC_ASSERT(kSmiTag == 0); + ASSERT_EQ(0, Smi::FromInt(0)); + __ and_(r5, lhs_, rhs_); + __ JumpIfNotSmi(r5, ¬_smis); + // One operand is a smi. EmitSmiNonsmiComparison generates code that can: + // 1) Return the answer. + // 2) Go to slow. + // 3) Fall through to both_loaded_as_doubles. + // 4) Jump to lhs_not_nan. + // In cases 3 and 4 we have found out we were dealing with a number-number + // comparison. The double values of the numbers have been loaded + // into d7 and d6. + EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_); + + __ bind(&both_loaded_as_doubles); + // The arguments have been converted to doubles and stored in d6 and d7 + Isolate* isolate = masm->isolate(); + __ bind(&lhs_not_nan); + Label no_nan; + __ fcmpu(d7, d6); + + Label nan, equal, less_than; + __ bunordered(&nan); + __ beq(&equal); + __ blt(&less_than); + __ li(r3, Operand(GREATER)); + __ Ret(); + __ bind(&equal); + __ li(r3, Operand(EQUAL)); + __ Ret(); + __ bind(&less_than); + __ li(r3, Operand(LESS)); + __ Ret(); + + __ bind(&nan); + // If one of the sides was a NaN then the v flag is set. Load r3 with + // whatever it takes to make the comparison fail, since comparisons with NaN + // always fail. + if (cc_ == lt || cc_ == le) { + __ li(r3, Operand(GREATER)); + } else { + __ li(r3, Operand(LESS)); + } + __ Ret(); + + __ bind(¬_smis); + // At this point we know we are dealing with two different objects, + // and neither of them is a Smi. The objects are in rhs_ and lhs_. + if (strict_) { + // This returns non-equal for some object types, or falls through if it + // was not lucky. + EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_); + } + + Label check_for_symbols; + Label flat_string_check; + // Check for heap-number-heap-number comparison. Can jump to slow case, + // or load both doubles into r3, r4, r5, r6 and jump to the code that handles + // that case. If the inputs are not doubles then jumps to check_for_symbols. + // In this case r5 will contain the type of rhs_. Never falls through. + EmitCheckForTwoHeapNumbers(masm, + lhs_, + rhs_, + &both_loaded_as_doubles, + &check_for_symbols, + &flat_string_check); + + __ bind(&check_for_symbols); + // In the strict case the EmitStrictTwoHeapObjectCompare already took care of + // symbols. + if (cc_ == eq && !strict_) { + // Returns an answer for two symbols or two detectable objects. + // Otherwise jumps to string case or not both strings case. + // Assumes that r5 is the type of rhs_ on entry. + EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow); + } + + // Check for both being sequential ASCII strings, and inline if that is the + // case. + __ bind(&flat_string_check); + + __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r5, r6, &slow); + + __ IncrementCounter(isolate->counters()->string_compare_native(), 1, r5, r6); + if (cc_ == eq) { + StringCompareStub::GenerateFlatAsciiStringEquals(masm, + lhs_, + rhs_, + r5, + r6); + } else { + StringCompareStub::GenerateCompareFlatAsciiStrings(masm, + lhs_, + rhs_, + r5, + r6, + r7); + } + // Never falls through to here. + + __ bind(&slow); + + __ Push(lhs_, rhs_); + // Figure out which native to call and setup the arguments. + Builtins::JavaScript native; + if (cc_ == eq) { + native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS; + } else { + native = Builtins::COMPARE; + int ncr; // NaN compare result + if (cc_ == lt || cc_ == le) { + ncr = GREATER; + } else { + ASSERT(cc_ == gt || cc_ == ge); // remaining cases + ncr = LESS; + } + __ LoadSmiLiteral(r3, Smi::FromInt(ncr)); + __ push(r3); + } + + // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) + // tagged as a small integer. + __ InvokeBuiltin(native, JUMP_FUNCTION); +} + + +// The stub expects its argument in the tos_ register and returns its result in +// it, too: zero for false, and a non-zero value for true. +void ToBooleanStub::Generate(MacroAssembler* masm) { + // This stub overrides SometimesSetsUpAFrame() to return false. That means + // we cannot call anything that could cause a GC from this stub. + Label patch; + const Register map = r22.is(tos_) ? r10 : r22; + + // undefined -> false. + CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false); + + // Boolean -> its value. + CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false); + CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true); + + // 'null' -> false. + CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false); + + if (types_.Contains(SMI)) { + // Smis: 0 -> false, all other -> true + Label not_smi; + __ JumpIfNotSmi(tos_, ¬_smi); + // tos_ contains the correct return value already + __ Ret(); + __ bind(¬_smi); + } else if (types_.NeedsMap()) { + // If we need a map later and have a Smi -> patch. + __ JumpIfSmi(tos_, &patch); + } + + if (types_.NeedsMap()) { + __ LoadP(map, FieldMemOperand(tos_, HeapObject::kMapOffset)); + + if (types_.CanBeUndetectable()) { + Label not_undetectable; + __ lbz(ip, FieldMemOperand(map, Map::kBitFieldOffset)); + STATIC_ASSERT((1 << Map::kIsUndetectable) < 0x8000); + __ andi(r0, ip, Operand(1 << Map::kIsUndetectable)); + __ beq(¬_undetectable, cr0); + // Undetectable -> false. + __ li(tos_, Operand(0, RelocInfo::NONE)); + __ Ret(); + __ bind(¬_undetectable); + } + } + + if (types_.Contains(SPEC_OBJECT)) { + // Spec object -> true. + Label not_js_object; + __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE); + // tos_ contains the correct non-zero return value already. + __ blt(¬_js_object); + __ Ret(); + __ bind(¬_js_object); + } + + if (types_.Contains(STRING)) { + // String value -> false iff empty. + Label not_string; + __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE); + __ bge(¬_string); + __ LoadP(tos_, FieldMemOperand(tos_, String::kLengthOffset)); + __ Ret(); // the string length is OK as the return value + __ bind(¬_string); + } + + if (types_.Contains(HEAP_NUMBER)) { + // Heap number -> false iff +0, -0, or NaN. + Label not_heap_number, nan_or_zero; + __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); + __ bne(¬_heap_number); + + __ lfd(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset)); + __ li(r0, Operand::Zero()); + __ push(r0); +#if !V8_TARGET_ARCH_PPC64 + __ push(r0); +#endif + __ lfd(d2, MemOperand(sp, 0)); + __ addi(sp, sp, Operand(8)); + __ fcmpu(d1, d2); + // "tos_" is a register, and contains a non zero value by default. + // Hence we only need to overwrite "tos_" with zero to return false for + // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. + __ bunordered(&nan_or_zero); + __ beq(&nan_or_zero); + __ Ret(); + + __ bind(&nan_or_zero); + __ li(tos_, Operand::Zero()); + __ Ret(); + + __ bind(¬_heap_number); + } + + __ bind(&patch); + GenerateTypeTransition(masm); +} + + +void ToBooleanStub::CheckOddball(MacroAssembler* masm, + Type type, + Heap::RootListIndex value, + bool result) { + if (types_.Contains(type)) { + // If we see an expected oddball, return its ToBoolean value tos_. + Label different_value; + __ LoadRoot(ip, value); + __ cmp(tos_, ip); + __ bne(&different_value); + // The value of a root is never NULL, so we can avoid loading a non-null + // value into tos_ when we want to return 'true'. + if (!result) { + __ li(tos_, Operand(0, RelocInfo::NONE)); + } + // Intel has some logic here not present on ARM + // unclear if it's needed or not + __ Ret(); + __ bind(&different_value); + } +} + + +void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) { + if (!tos_.is(r6)) { + __ mr(r6, tos_); + } + __ LoadSmiLiteral(r5, Smi::FromInt(tos_.code())); + __ LoadSmiLiteral(r4, Smi::FromInt(types_.ToByte())); + __ Push(r6, r5, r4); + // Patch the caller to an appropriate specialized stub and return the + // operation result to the caller of the stub. + __ TailCallExternalReference( + ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()), + 3, + 1); +} + + +void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { + // We don't allow a GC during a store buffer overflow so there is no need to + // store the registers in any particular way, but we do have to store and + // restore them. + __ mflr(r0); + __ MultiPush(kJSCallerSaved | r0.bit()); + if (save_doubles_ == kSaveFPRegs) { + const int kNumRegs = DwVfpRegister::kNumVolatileRegisters; + __ subi(sp, sp, Operand(kDoubleSize * kNumRegs)); + for (int i = 0; i < kNumRegs; i++) { + DwVfpRegister reg = DwVfpRegister::from_code(i); + __ stfd(reg, MemOperand(sp, i * kDoubleSize)); + } + } + const int argument_count = 1; + const int fp_argument_count = 0; + const Register scratch = r4; + + AllowExternalCallThatCantCauseGC scope(masm); + __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); + __ mov(r3, Operand(ExternalReference::isolate_address())); + __ CallCFunction( + ExternalReference::store_buffer_overflow_function(masm->isolate()), + argument_count); + if (save_doubles_ == kSaveFPRegs) { + const int kNumRegs = DwVfpRegister::kNumVolatileRegisters; + for (int i = 0; i < kNumRegs; i++) { + DwVfpRegister reg = DwVfpRegister::from_code(i); + __ lfd(reg, MemOperand(sp, i * kDoubleSize)); + } + __ addi(sp, sp, Operand(kDoubleSize * kNumRegs)); + } + __ MultiPop(kJSCallerSaved | r0.bit()); + __ mtlr(r0); + __ Ret(); +} + + +void UnaryOpStub::PrintName(StringStream* stream) { + const char* op_name = Token::Name(op_); + const char* overwrite_name = NULL; // Make g++ happy. + switch (mode_) { + case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break; + case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break; + } + stream->Add("UnaryOpStub_%s_%s_%s", + op_name, + overwrite_name, + UnaryOpIC::GetName(operand_type_)); +} + + +// TODO(svenpanne): Use virtual functions instead of switch. +void UnaryOpStub::Generate(MacroAssembler* masm) { + switch (operand_type_) { + case UnaryOpIC::UNINITIALIZED: + GenerateTypeTransition(masm); + break; + case UnaryOpIC::SMI: + GenerateSmiStub(masm); + break; + case UnaryOpIC::HEAP_NUMBER: + GenerateHeapNumberStub(masm); + break; + case UnaryOpIC::GENERIC: + GenerateGenericStub(masm); + break; + } +} + + +void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { + __ mr(r6, r3); // the operand + __ LoadSmiLiteral(r5, Smi::FromInt(op_)); + __ LoadSmiLiteral(r4, Smi::FromInt(mode_)); + __ LoadSmiLiteral(r3, Smi::FromInt(operand_type_)); + __ Push(r6, r5, r4, r3); + + __ TailCallExternalReference( + ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1); +} + + +// TODO(svenpanne): Use virtual functions instead of switch. +void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) { + switch (op_) { + case Token::SUB: + GenerateSmiStubSub(masm); + break; + case Token::BIT_NOT: + GenerateSmiStubBitNot(masm); + break; + default: + UNREACHABLE(); + } +} + + +void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) { + Label non_smi, slow; + GenerateSmiCodeSub(masm, &non_smi, &slow); + __ bind(&non_smi); + __ bind(&slow); + GenerateTypeTransition(masm); +} + + +void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) { + Label non_smi; + GenerateSmiCodeBitNot(masm, &non_smi); + __ bind(&non_smi); + GenerateTypeTransition(masm); +} + + +void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm, + Label* non_smi, + Label* slow) { + __ JumpIfNotSmi(r3, non_smi); + + // The result of negating zero or the smallest negative smi is not a smi. + __ TestBitRange(r3, kBitsPerPointer - 2, 0, r0); + __ beq(slow, cr0); + + // Return '- value'. + __ neg(r3, r3); + __ Ret(); +} + + +void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm, + Label* non_smi) { + __ JumpIfNotSmi(r3, non_smi); + + // Flip bits and revert inverted smi-tag. + ASSERT(kSmiTagMask == 1); + __ notx(r3, r3); + __ ClearRightImm(r3, r3, Operand(kSmiTagSize + kSmiShiftSize)); + __ Ret(); +} + + +// TODO(svenpanne): Use virtual functions instead of switch. +void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { + switch (op_) { + case Token::SUB: + GenerateHeapNumberStubSub(masm); + break; + case Token::BIT_NOT: + GenerateHeapNumberStubBitNot(masm); + break; + default: + UNREACHABLE(); + } +} + + +void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) { + Label non_smi, slow, call_builtin; + GenerateSmiCodeSub(masm, &non_smi, &call_builtin); + __ bind(&non_smi); + GenerateHeapNumberCodeSub(masm, &slow); + __ bind(&slow); + GenerateTypeTransition(masm); + __ bind(&call_builtin); + GenerateGenericCodeFallback(masm); +} + + +void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) { + Label non_smi, slow; + GenerateSmiCodeBitNot(masm, &non_smi); + __ bind(&non_smi); + GenerateHeapNumberCodeBitNot(masm, &slow); + __ bind(&slow); + GenerateTypeTransition(masm); +} + +void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, + Label* slow) { + EmitCheckForHeapNumber(masm, r3, r4, r9, slow); + // r3 is a heap number. Get a new heap number in r4. + if (mode_ == UNARY_OVERWRITE) { + __ lwz(r5, FieldMemOperand(r3, HeapNumber::kExponentOffset)); + __ xoris(r5, r5, Operand(HeapNumber::kSignMask >> 16)); // Flip sign. + __ stw(r5, FieldMemOperand(r3, HeapNumber::kExponentOffset)); + } else { + Label slow_allocate_heapnumber, heapnumber_allocated; + __ AllocateHeapNumber(r4, r5, r6, r9, &slow_allocate_heapnumber); + __ b(&heapnumber_allocated); + + __ bind(&slow_allocate_heapnumber); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(r3); + __ CallRuntime(Runtime::kNumberAlloc, 0); + __ mr(r4, r3); + __ pop(r3); + } + + __ bind(&heapnumber_allocated); + __ lwz(r6, FieldMemOperand(r3, HeapNumber::kMantissaOffset)); + __ lwz(r5, FieldMemOperand(r3, HeapNumber::kExponentOffset)); + __ stw(r6, FieldMemOperand(r4, HeapNumber::kMantissaOffset)); + __ mov(r0, Operand(HeapNumber::kSignMask)); + __ xor_(r5, r5, r0); + __ stw(r5, FieldMemOperand(r4, HeapNumber::kExponentOffset)); + __ mr(r3, r4); + } + __ Ret(); +} + + +void UnaryOpStub::GenerateHeapNumberCodeBitNot( + MacroAssembler* masm, Label* slow) { + Label impossible; + + EmitCheckForHeapNumber(masm, r3, r4, r9, slow); + // Convert the heap number in r3 to an untagged integer in r4. + __ ConvertToInt32(r3, r4, r5, r6, d0, slow); + + // Do the bitwise operation and check if the result fits in a smi. + __ notx(r4, r4); + +#if !V8_TARGET_ARCH_PPC64 + Label try_float; + __ JumpIfNotSmiCandidate(r4, r5, &try_float); +#endif + + // Tag the result as a smi and we're done. + __ SmiTag(r3, r4); + __ Ret(); + +#if !V8_TARGET_ARCH_PPC64 + // Try to store the result in a heap number. + __ bind(&try_float); + if (mode_ == UNARY_NO_OVERWRITE) { + Label slow_allocate_heapnumber, heapnumber_allocated; + // Allocate a new heap number without zapping r0, which we need if it fails. + __ AllocateHeapNumber(r5, r6, r7, r9, &slow_allocate_heapnumber); + __ b(&heapnumber_allocated); + + __ bind(&slow_allocate_heapnumber); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(r3); // Push the heap number, not the untagged int32. + __ CallRuntime(Runtime::kNumberAlloc, 0); + __ mr(r5, r3); // Move the new heap number into r5. + // Get the heap number into r3, now that the new heap number is in r5. + __ pop(r3); + } + + // Convert the heap number in r3 to an untagged integer in r4. + // This can't go slow-case because it's the same number we already + // converted once again. + __ ConvertToInt32(r3, r4, r6, r7, d0, &impossible); + __ notx(r4, r4); + + __ bind(&heapnumber_allocated); + __ mr(r3, r5); // Move newly allocated heap number to r0. + } + + // Convert the int32 in r4 to the heap number in r3. + FloatingPointHelper::ConvertIntToDouble( + masm, r4, d0); + __ stfd(d0, FieldMemOperand(r3, HeapNumber::kValueOffset)); + __ Ret(); + + __ bind(&impossible); + if (FLAG_debug_code) { + __ stop("Incorrect assumption in bit-not stub"); + } +#endif +} + + +// TODO(svenpanne): Use virtual functions instead of switch. +void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) { + switch (op_) { + case Token::SUB: + GenerateGenericStubSub(masm); + break; + case Token::BIT_NOT: + GenerateGenericStubBitNot(masm); + break; + default: + UNREACHABLE(); + } +} + + +void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) { + Label non_smi, slow; + GenerateSmiCodeSub(masm, &non_smi, &slow); + __ bind(&non_smi); + GenerateHeapNumberCodeSub(masm, &slow); + __ bind(&slow); + GenerateGenericCodeFallback(masm); +} + + +void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) { + Label non_smi, slow; + GenerateSmiCodeBitNot(masm, &non_smi); + __ bind(&non_smi); + GenerateHeapNumberCodeBitNot(masm, &slow); + __ bind(&slow); + GenerateGenericCodeFallback(masm); +} + + +void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) { + // Handle the slow case by jumping to the JavaScript builtin. + __ push(r3); + switch (op_) { + case Token::SUB: + __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION); + break; + case Token::BIT_NOT: + __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); + break; + default: + UNREACHABLE(); + } +} + + +void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { + Label get_result; + + __ Push(r4, r3); + + __ LoadSmiLiteral(r5, Smi::FromInt(MinorKey())); + __ LoadSmiLiteral(r4, Smi::FromInt(op_)); + __ LoadSmiLiteral(r3, Smi::FromInt(operands_type_)); + __ Push(r5, r4, r3); + + __ TailCallExternalReference( + ExternalReference(IC_Utility(IC::kBinaryOp_Patch), + masm->isolate()), + 5, + 1); +} + + +void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( + MacroAssembler* masm) { + UNIMPLEMENTED(); +} + + +void BinaryOpStub::Generate(MacroAssembler* masm) { + // Explicitly allow generation of nested stubs. It is safe here because + // generation code does not use any raw pointers. + AllowStubCallsScope allow_stub_calls(masm, true); + + switch (operands_type_) { + case BinaryOpIC::UNINITIALIZED: + GenerateTypeTransition(masm); + break; + case BinaryOpIC::SMI: + GenerateSmiStub(masm); + break; + case BinaryOpIC::INT32: + GenerateInt32Stub(masm); + break; + case BinaryOpIC::HEAP_NUMBER: + GenerateHeapNumberStub(masm); + break; + case BinaryOpIC::ODDBALL: + GenerateOddballStub(masm); + break; + case BinaryOpIC::BOTH_STRING: + GenerateBothStringStub(masm); + break; + case BinaryOpIC::STRING: + GenerateStringStub(masm); + break; + case BinaryOpIC::GENERIC: + GenerateGeneric(masm); + break; + default: + UNREACHABLE(); + } +} + + +void BinaryOpStub::PrintName(StringStream* stream) { + const char* op_name = Token::Name(op_); + const char* overwrite_name; + switch (mode_) { + case NO_OVERWRITE: overwrite_name = "Alloc"; break; + case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; + case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; + default: overwrite_name = "UnknownOverwrite"; break; + } + stream->Add("BinaryOpStub_%s_%s_%s", + op_name, + overwrite_name, + BinaryOpIC::GetName(operands_type_)); +} + + +void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) { + Register left = r4; + Register right = r3; + Register scratch1 = r10; + Register scratch2 = r22; + + ASSERT(right.is(r3)); + STATIC_ASSERT(kSmiTag == 0); + + Label not_smi_result; + switch (op_) { + case Token::ADD: { + Label undo_add, add_no_overflow; + // C = A+B; C overflows if A/B have same sign and C has diff sign than A + __ xor_(r0, left, right); + __ mr(scratch1, right); + __ add(right, left, right); // Add optimistically. + __ TestSignBit(r0, r0); + __ bne(&add_no_overflow, cr0); + __ xor_(r0, right, scratch1); + __ TestSignBit(r0, r0); + __ bne(&undo_add, cr0); + __ bind(&add_no_overflow); + __ Ret(); + __ bind(&undo_add); + __ mr(right, scratch1); // Revert optimistic add. + break; + } + case Token::SUB: { + Label undo_sub, sub_no_overflow; + // C = A-B; C overflows if A/B have diff signs and C has diff sign than A + __ xor_(r0, left, right); + __ mr(scratch1, right); + __ sub(right, left, right); // Subtract optimistically. + __ TestSignBit(r0, r0); + __ beq(&sub_no_overflow, cr0); + __ xor_(r0, right, left); + __ TestSignBit(r0, r0); + __ bne(&undo_sub, cr0); + __ bind(&sub_no_overflow); + __ Ret(); + __ bind(&undo_sub); + __ mr(right, scratch1); // Revert optimistic subtract. + break; + } + case Token::MUL: { + Label mul_zero, mul_neg_zero; +#if V8_TARGET_ARCH_PPC64 + // Remove tag from both operands. + __ SmiUntag(ip, right); + __ SmiUntag(r0, left); + // Do multiplication + // scratch1 = product (untagged) + // scratch2 = sign-extended higher 32 bits of product. + __ Mul(scratch1, r0, ip); + // Check for overflowing the smi range - no overflow if higher 33 bits of + // the result are identical. + __ TestIfInt32(scratch1, scratch2, ip); + __ bne(¬_smi_result); +#else + // Remove tag from one of the operands. This way the multiplication result + // will be a smi if it fits the smi range. + __ SmiUntag(ip, right); + // Do multiplication + // scratch1 = lower 32 bits of product. + // scratch2 = higher 32 bits of product. + __ mullw(scratch1, left, ip); + __ mulhw(scratch2, left, ip); + // Check for overflowing the smi range - no overflow if higher 33 bits of + // the result are identical. + __ TestIfInt32(scratch2, scratch1, ip); + __ bne(¬_smi_result); +#endif + // Go slow on zero result to handle -0. + __ cmpi(scratch1, Operand::Zero()); + __ beq(&mul_zero); +#if V8_TARGET_ARCH_PPC64 + __ SmiTag(right, scratch1); +#else + __ mr(right, scratch1); +#endif + __ Ret(); + __ bind(&mul_zero); + // We need -0 if we were multiplying a negative number with 0 to get 0. + // We know one of them was zero. + __ add(scratch2, right, left); + __ cmpi(scratch2, Operand::Zero()); + __ blt(&mul_neg_zero); + __ LoadSmiLiteral(right, Smi::FromInt(0)); + __ Ret(); // Return smi 0 if the non-zero one was positive. + __ bind(&mul_neg_zero); + // We fall through here if we multiplied a negative number with 0, because + // that would mean we should produce -0. + break; + } + case Token::DIV: { + Label check_neg_zero; + __ SmiUntag(ip, left); + __ SmiUntag(scratch2, right, SetRC); + __ Div(scratch1, ip, scratch2); + // Check for zero on the right hand side. + __ beq(¬_smi_result, cr0); + // Not Smi if remainder is non-zero. + __ Mul(scratch2, scratch2, scratch1); + __ cmp(ip, scratch2); + __ bne(¬_smi_result); + // If the result is 0, we need to check for the -0 case. + __ SmiTag(scratch2, scratch1, SetRC); + __ beq(&check_neg_zero, cr0); + // Check for Smi overflow + __ xor_(scratch1, scratch2, scratch1, SetRC); + __ blt(¬_smi_result, cr0); + __ mr(right, scratch2); + __ Ret(); + + // If divisor (right) is negative, we must produce -0. + __ bind(&check_neg_zero); + __ cmpi(right, Operand::Zero()); + __ blt(¬_smi_result); + __ mr(right, scratch2); + __ Ret(); + break; + } + case Token::MOD: { + Label check_neg_zero; + __ SmiUntag(ip, left); + __ SmiUntag(scratch2, right, SetRC); + __ Div(scratch1, ip, scratch2); + // Check for zero on the right hand side. + __ beq(¬_smi_result, cr0); + __ Mul(scratch1, scratch2, scratch1); + __ sub(scratch1, ip, scratch1, LeaveOE, SetRC); + // If the result is 0, we need to check for the -0 case. + __ beq(&check_neg_zero, cr0); +#if !V8_TARGET_ARCH_PPC64 + // Check that the signed result fits in a Smi. + __ JumpIfNotSmiCandidate(scratch1, scratch2, ¬_smi_result); +#endif + __ SmiTag(right, scratch1); + __ Ret(); + + // If dividend (left) is negative, we must produce -0. + __ bind(&check_neg_zero); + __ cmpi(left, Operand::Zero()); + __ blt(¬_smi_result); + __ LoadSmiLiteral(right, Smi::FromInt(0)); + __ Ret(); + break; + } + case Token::BIT_OR: + __ orx(right, left, right); + __ Ret(); + break; + case Token::BIT_AND: + __ and_(right, left, right); + __ Ret(); + break; + case Token::BIT_XOR: + __ xor_(right, left, right); + __ Ret(); + break; + case Token::SAR: + // Remove tags from right operand. + __ GetLeastBitsFromSmi(scratch1, right, 5); + __ ShiftRightArith(right, left, scratch1); + // Smi tag result. + __ ClearRightImm(right, right, Operand(kSmiTagSize + kSmiShiftSize)); + __ Ret(); + break; + case Token::SHR: + // Remove tags from operands. We can't do this on a 31 bit number + // because then the 0s get shifted into bit 30 instead of bit 31. + __ SmiUntag(scratch1, left); + __ GetLeastBitsFromSmi(scratch2, right, 5); + __ srw(scratch1, scratch1, scratch2); + // Unsigned shift is not allowed to produce a negative number. + __ JumpIfNotUnsignedSmiCandidate(scratch1, r0, ¬_smi_result); + // Smi tag result. + __ SmiTag(right, scratch1); + __ Ret(); + break; + case Token::SHL: + // Remove tags from operands. + __ SmiUntag(scratch1, left); + __ GetLeastBitsFromSmi(scratch2, right, 5); + __ ShiftLeft(scratch1, scratch1, scratch2); +#if !V8_TARGET_ARCH_PPC64 + // Check that the signed result fits in a Smi. + __ JumpIfNotSmiCandidate(scratch1, scratch2, ¬_smi_result); +#endif + __ SmiTag(right, scratch1); + __ Ret(); + break; + default: + UNREACHABLE(); + } + __ bind(¬_smi_result); +} + + +void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, + bool smi_operands, + Label* not_numbers, + Label* gc_required) { + Register left = r4; + Register right = r3; + Register scratch1 = r10; + Register scratch2 = r22; + Register scratch3 = r7; + + ASSERT(smi_operands || (not_numbers != NULL)); + if (smi_operands) { + __ AssertSmi(left); + __ AssertSmi(right); + } + + Register heap_number_map = r9; + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + + switch (op_) { + case Token::ADD: + case Token::SUB: + case Token::MUL: + case Token::DIV: + case Token::MOD: { + // Load left and right operands into d1 and d2 + // Allocate new heap number for result. + Register result = r8; + GenerateHeapResultAllocation( + masm, result, heap_number_map, scratch1, scratch2, gc_required); + + // Load the operands. + if (smi_operands) { + FloatingPointHelper::LoadSmis(masm, scratch1, scratch2); + } else { + FloatingPointHelper::LoadOperands(masm, + heap_number_map, + scratch1, + scratch2, + not_numbers); + } + + // Calculate the result. + // Using FP registers: + // d1: Left value + // d2: Right value + switch (op_) { + case Token::ADD: + __ fadd(d1, d1, d2); + break; + case Token::SUB: + __ fsub(d1, d1, d2); + break; + case Token::MUL: + __ fmul(d1, d1, d2); + break; + case Token::DIV: + __ fdiv(d1, d1, d2); + break; + case Token::MOD: + // Call the C function to handle the double operation. + FloatingPointHelper::CallCCodeForDoubleOperation(masm, + op_, + result, + scratch1); + if (FLAG_debug_code) { + __ stop("Unreachable code."); + } + break; + default: + UNREACHABLE(); + } + __ stfd(d1, FieldMemOperand(result, HeapNumber::kValueOffset)); + __ mr(r3, result); + __ Ret(); + break; + } + case Token::BIT_OR: + case Token::BIT_XOR: + case Token::BIT_AND: + case Token::SAR: + case Token::SHR: + case Token::SHL: { + if (smi_operands) { + __ SmiUntag(r6, left); + __ SmiUntag(r5, right); + } else { + // Convert operands to 32-bit integers. Right in r5 and left in r6. + FloatingPointHelper::ConvertNumberToInt32(masm, + left, + r6, + heap_number_map, + scratch1, + scratch2, + scratch3, + d0, + not_numbers); + FloatingPointHelper::ConvertNumberToInt32(masm, + right, + r5, + heap_number_map, + scratch1, + scratch2, + scratch3, + d0, + not_numbers); + } + + Label result_not_a_smi; + switch (op_) { + case Token::BIT_OR: + __ orx(r5, r6, r5); + break; + case Token::BIT_XOR: + __ xor_(r5, r6, r5); + break; + case Token::BIT_AND: + __ and_(r5, r6, r5); + break; + case Token::SAR: + // Use only the 5 least significant bits of the shift count. + __ GetLeastBitsFromInt32(r5, r5, 5); + __ sraw(r5, r6, r5); + break; + case Token::SHR: + { + // Use only the 5 least significant bits of the shift count. + __ GetLeastBitsFromInt32(r5, r5, 5); + // SHR is special because it is required to produce a positive answer. + // The code below for writing into heap numbers isn't capable of + // writing the register as an unsigned int so we go to slow case if we + // hit this case. +#if V8_TARGET_ARCH_PPC64 + const Condition cond = ne; + __ srw(r5, r6, r5); + __ TestSignBit32(r5, r0); +#else + const Condition cond = lt; + __ srw(r5, r6, r5, SetRC); +#endif + __ b(cond, &result_not_a_smi, cr0); + break; + } + case Token::SHL: + // Use only the 5 least significant bits of the shift count. + __ GetLeastBitsFromInt32(r5, r5, 5); + __ ShiftLeft(r5, r6, r5); + break; + default: + UNREACHABLE(); + } + +#if !V8_TARGET_ARCH_PPC64 + // Check that the *signed* result fits in a smi. + __ JumpIfNotSmiCandidate(r5, r6, &result_not_a_smi); +#endif + __ SmiTag(r3, r5); + __ Ret(); + + // Allocate new heap number for result. + __ bind(&result_not_a_smi); + Register result = r8; + if (smi_operands) { + __ AllocateHeapNumber( + result, scratch1, scratch2, heap_number_map, gc_required); + } else { + GenerateHeapResultAllocation( + masm, result, heap_number_map, scratch1, scratch2, gc_required); + } + + // r5: Answer as signed int32. + // r8: Heap number to write answer into. + + // Nothing can go wrong now, so move the heap number to r3, which is the + // result. + __ mr(r3, r8); + + // Convert the int32 in r5 to the heap number in r3. As + // mentioned above SHR needs to always produce a positive result. + if (op_ == Token::SHR) { + FloatingPointHelper::ConvertUnsignedIntToDouble( + masm, r5, d0); + } else { + FloatingPointHelper::ConvertIntToDouble( + masm, r5, d0); + } + __ stfd(d0, FieldMemOperand(r3, HeapNumber::kValueOffset)); + __ Ret(); + break; + } + default: + UNREACHABLE(); + } +} + + +// Generate the smi code. If the operation on smis are successful this return is +// generated. If the result is not a smi and heap number allocation is not +// requested the code falls through. If number allocation is requested but a +// heap number cannot be allocated the code jumps to the lable gc_required. +void BinaryOpStub::GenerateSmiCode( + MacroAssembler* masm, + Label* use_runtime, + Label* gc_required, + SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { + Label not_smis; + + Register left = r4; + Register right = r3; + Register scratch1 = r10; + + // Perform combined smi check on both operands. + __ orx(scratch1, left, right); + STATIC_ASSERT(kSmiTag == 0); + __ JumpIfNotSmi(scratch1, ¬_smis); + + // If the smi-smi operation results in a smi return is generated. + GenerateSmiSmiOperation(masm); + + // If heap number results are possible generate the result in an allocated + // heap number. + if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) { + GenerateFPOperation(masm, true, use_runtime, gc_required); + } + __ bind(¬_smis); +} + + +void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { + Label not_smis, call_runtime; + + if (result_type_ == BinaryOpIC::UNINITIALIZED || + result_type_ == BinaryOpIC::SMI) { + // Only allow smi results. + GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS); + } else { + // Allow heap number result and don't make a transition if a heap number + // cannot be allocated. + GenerateSmiCode(masm, + &call_runtime, + &call_runtime, + ALLOW_HEAPNUMBER_RESULTS); + } + + // Code falls through if the result is not returned as either a smi or heap + // number. + GenerateTypeTransition(masm); + + __ bind(&call_runtime); + GenerateCallRuntime(masm); +} + + +void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) { + ASSERT(operands_type_ == BinaryOpIC::STRING); + ASSERT(op_ == Token::ADD); + // Try to add arguments as strings, otherwise, transition to the generic + // BinaryOpIC type. + GenerateAddStrings(masm); + GenerateTypeTransition(masm); +} + + +void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { + Label call_runtime; + ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING); + ASSERT(op_ == Token::ADD); + // If both arguments are strings, call the string add stub. + // Otherwise, do a transition. + + // Registers containing left and right operands respectively. + Register left = r4; + Register right = r3; + + // Test if left operand is a string. + __ JumpIfSmi(left, &call_runtime); + __ CompareObjectType(left, r5, r5, FIRST_NONSTRING_TYPE); + __ bge(&call_runtime); + + // Test if right operand is a string. + __ JumpIfSmi(right, &call_runtime); + __ CompareObjectType(right, r5, r5, FIRST_NONSTRING_TYPE); + __ bge(&call_runtime); + + StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); + GenerateRegisterArgsPush(masm); + __ TailCallStub(&string_add_stub); + + __ bind(&call_runtime); + GenerateTypeTransition(masm); +} + + +void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { + ASSERT(operands_type_ == BinaryOpIC::INT32); + + Register left = r4; + Register right = r3; + Register scratch1 = r10; + Register scratch2 = r11; + DwVfpRegister double_scratch0 = d0; + DwVfpRegister double_scratch1 = d1; + + Register heap_number_result = no_reg; + Register heap_number_map = r9; + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + + Label call_runtime; + // Labels for type transition, used for wrong input or output types. + // Both label are currently actually bound to the same position. We use two + // different label to differentiate the cause leading to type transition. + Label transition; + + // Smi-smi fast case. + Label skip; + __ orx(scratch1, left, right); + __ JumpIfNotSmi(scratch1, &skip); + GenerateSmiSmiOperation(masm); + // Fall through if the result is not a smi. + __ bind(&skip); + + switch (op_) { + case Token::ADD: + case Token::SUB: + case Token::MUL: + case Token::DIV: + case Token::MOD: { + // Load both operands and check that they are 32-bit integer. + // Jump to type transition if they are not. The registers r3 and r4 (right + // and left) are preserved for the runtime call. + FloatingPointHelper::LoadNumberAsInt32Double(masm, + right, + d2, + d8, + heap_number_map, + scratch1, + scratch2, + &transition); + FloatingPointHelper::LoadNumberAsInt32Double(masm, + left, + d1, + d8, + heap_number_map, + scratch1, + scratch2, + &transition); + + Label return_heap_number; + switch (op_) { + case Token::ADD: + __ fadd(d1, d1, d2); + break; + case Token::SUB: + __ fsub(d1, d1, d2); + break; + case Token::MUL: + __ fmul(d1, d1, d2); + break; + case Token::DIV: + __ fdiv(d1, d1, d2); + break; + case Token::MOD: { + Label pop_and_call_runtime; + + // Allocate a heap number to store the result. + heap_number_result = r8; + GenerateHeapResultAllocation(masm, + heap_number_result, + heap_number_map, + scratch1, + scratch2, + &pop_and_call_runtime); + + // Call the C function to handle the double operation. + FloatingPointHelper::CallCCodeForDoubleOperation( + masm, op_, heap_number_result, scratch1); + if (FLAG_debug_code) { + __ stop("Unreachable code."); + } + + __ bind(&pop_and_call_runtime); + __ b(&call_runtime); + break; + } + default: + UNREACHABLE(); + } + + if (op_ != Token::DIV) { + // These operations produce an integer result. + // Try to return a smi if we can. + // Otherwise return a heap number if allowed, or jump to type + // transition. + + __ EmitVFPTruncate(kRoundToZero, + scratch1, + d1, + scratch2, + d8); + + // result does not fit in a 32-bit integer. + Label *not_int32 = ((result_type_ <= BinaryOpIC::INT32) ? + &transition : &return_heap_number); + __ bne(not_int32); + +#if !V8_TARGET_ARCH_PPC64 + // Check if the result fits in a smi. + // If not try to return a heap number. + __ JumpIfNotSmiCandidate(scratch1, scratch2, &return_heap_number); +#endif + // Check for minus zero. Return heap number for minus zero. + Label not_zero; + __ cmpi(scratch1, Operand::Zero()); + __ bne(¬_zero); + + __ subi(sp, sp, Operand(8)); + __ stfd(d1, MemOperand(sp, 0)); +#if V8_TARGET_ARCH_PPC64 + __ ld(scratch2, MemOperand(sp, 0)); +#else +#if __FLOAT_WORD_ORDER == __LITTLE_ENDIAN + __ lwz(scratch2, MemOperand(sp, 4)); +#else + __ lwz(scratch2, MemOperand(sp, 0)); +#endif +#endif + __ addi(sp, sp, Operand(8)); + + __ TestSignBit(scratch2, r0); + __ bne(&return_heap_number, cr0); + __ bind(¬_zero); + + // Tag the result and return. + __ SmiTag(r3, scratch1); + __ Ret(); + } else { + // DIV just falls through to allocating a heap number. + } + + __ bind(&return_heap_number); + // Return a heap number, or fall through to type transition or runtime + // call if we can't. + if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER + : BinaryOpIC::INT32)) { + heap_number_result = r8; + GenerateHeapResultAllocation(masm, + heap_number_result, + heap_number_map, + scratch1, + scratch2, + &call_runtime); + __ stfd(d1, FieldMemOperand(heap_number_result, + HeapNumber::kValueOffset)); + __ mr(r3, heap_number_result); + __ Ret(); + } + + // A DIV operation expecting an integer result falls through + // to type transition. + break; + } + + case Token::BIT_OR: + case Token::BIT_XOR: + case Token::BIT_AND: + case Token::SAR: + case Token::SHR: + case Token::SHL: { + Label return_heap_number; + Register scratch3 = r8; + // Convert operands to 32-bit integers. Right in r5 and left in r6. The + // registers r3 and r4 (right and left) are preserved for the runtime + // call. + FloatingPointHelper::LoadNumberAsInt32(masm, + left, + r6, + heap_number_map, + scratch1, + scratch2, + scratch3, + double_scratch0, + double_scratch1, + &transition); + FloatingPointHelper::LoadNumberAsInt32(masm, + right, + r5, + heap_number_map, + scratch1, + scratch2, + scratch3, + double_scratch0, + double_scratch1, + &transition); + + // The ECMA-262 standard specifies that, for shift operations, only the + // 5 least significant bits of the shift value should be used. + switch (op_) { + case Token::BIT_OR: + __ orx(r5, r6, r5); + break; + case Token::BIT_XOR: + __ xor_(r5, r6, r5); + break; + case Token::BIT_AND: + __ and_(r5, r6, r5); + break; + case Token::SAR: + __ GetLeastBitsFromInt32(r5, r5, 5); + __ sraw(r5, r6, r5); + break; + case Token::SHR: + { + __ GetLeastBitsFromInt32(r5, r5, 5); + // SHR is special because it is required to produce a positive answer. + // We only get a negative result if the shift value (r5) is 0. + // This result cannot be respresented as a signed 32-bit integer, try + // to return a heap number if we can. +#if V8_TARGET_ARCH_PPC64 + const Condition cond = ne; + __ srw(r5, r6, r5); + __ TestSignBit32(r5, r0); +#else + const Condition cond = lt; + __ srw(r5, r6, r5, SetRC); +#endif + __ b(cond, ((result_type_ <= BinaryOpIC::INT32) + ? &transition + : &return_heap_number), cr0); + break; + } + case Token::SHL: + __ andi(r5, r5, Operand(0x1f)); + __ ShiftLeft(r5, r6, r5); + break; + default: + UNREACHABLE(); + } + +#if !V8_TARGET_ARCH_PPC64 + // Check if the result fits in a smi. + // If not try to return a heap number. (We know the result is an int32.) + __ JumpIfNotSmiCandidate(r5, scratch1, &return_heap_number); +#endif + // Tag the result and return. + __ SmiTag(r3, r5); + __ Ret(); + + __ bind(&return_heap_number); + heap_number_result = r8; + GenerateHeapResultAllocation(masm, + heap_number_result, + heap_number_map, + scratch1, + scratch2, + &call_runtime); + + if (op_ != Token::SHR) { + // Convert the result to a floating point value. + FloatingPointHelper::ConvertIntToDouble(masm, r5, double_scratch0); + } else { + // The result must be interpreted as an unsigned 32-bit integer. + FloatingPointHelper::ConvertUnsignedIntToDouble(masm, r5, + double_scratch0); + } + + // Store the result. + __ stfd(double_scratch0, FieldMemOperand(heap_number_result, + HeapNumber::kValueOffset)); + __ mr(r3, heap_number_result); + __ Ret(); + + break; + } + + default: + UNREACHABLE(); + } + + // We never expect DIV to yield an integer result, so we always generate + // type transition code for DIV operations expecting an integer result: the + // code will fall through to this type transition. + if (transition.is_linked() || + ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) { + __ bind(&transition); + GenerateTypeTransition(masm); + } + + __ bind(&call_runtime); + GenerateCallRuntime(masm); +} + + +void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { + Label call_runtime; + + if (op_ == Token::ADD) { + // Handle string addition here, because it is the only operation + // that does not do a ToNumber conversion on the operands. + GenerateAddStrings(masm); + } + + // Convert oddball arguments to numbers. + Label check, done; + __ CompareRoot(r4, Heap::kUndefinedValueRootIndex); + __ bne(&check); + if (Token::IsBitOp(op_)) { + __ LoadSmiLiteral(r4, Smi::FromInt(0)); + } else { + __ LoadRoot(r4, Heap::kNanValueRootIndex); + } + __ b(&done); + __ bind(&check); + __ CompareRoot(r3, Heap::kUndefinedValueRootIndex); + __ bne(&done); + if (Token::IsBitOp(op_)) { + __ LoadSmiLiteral(r3, Smi::FromInt(0)); + } else { + __ LoadRoot(r3, Heap::kNanValueRootIndex); + } + __ bind(&done); + + GenerateHeapNumberStub(masm); +} + + +void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { + Label call_runtime; + GenerateFPOperation(masm, false, &call_runtime, &call_runtime); + + __ bind(&call_runtime); + GenerateCallRuntime(masm); +} + + +void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { + Label call_runtime, call_string_add_or_runtime; + + GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); + + GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime); + + __ bind(&call_string_add_or_runtime); + if (op_ == Token::ADD) { + GenerateAddStrings(masm); + } + + __ bind(&call_runtime); + GenerateCallRuntime(masm); +} + + +void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { + ASSERT(op_ == Token::ADD); + Label left_not_string, call_runtime; + + Register left = r4; + Register right = r3; + + // Check if left argument is a string. + __ JumpIfSmi(left, &left_not_string); + __ CompareObjectType(left, r5, r5, FIRST_NONSTRING_TYPE); + __ bge(&left_not_string); + + StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB); + GenerateRegisterArgsPush(masm); + __ TailCallStub(&string_add_left_stub); + + // Left operand is not a string, test right. + __ bind(&left_not_string); + __ JumpIfSmi(right, &call_runtime); + __ CompareObjectType(right, r5, r5, FIRST_NONSTRING_TYPE); + __ bge(&call_runtime); + + StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB); + GenerateRegisterArgsPush(masm); + __ TailCallStub(&string_add_right_stub); + + // At least one argument is not a string. + __ bind(&call_runtime); +} + + +void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) { + GenerateRegisterArgsPush(masm); + switch (op_) { + case Token::ADD: + __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); + break; + case Token::SUB: + __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); + break; + case Token::MUL: + __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); + break; + case Token::DIV: + __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); + break; + case Token::MOD: + __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); + break; + case Token::BIT_OR: + __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); + break; + case Token::BIT_AND: + __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); + break; + case Token::BIT_XOR: + __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); + break; + case Token::SAR: + __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); + break; + case Token::SHR: + __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); + break; + case Token::SHL: + __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); + break; + default: + UNREACHABLE(); + } +} + + +void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm, + Register result, + Register heap_number_map, + Register scratch1, + Register scratch2, + Label* gc_required) { + // Code below will scratch result if allocation fails. To keep both arguments + // intact for the runtime call result cannot be one of these. + ASSERT(!result.is(r3) && !result.is(r4)); + + if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) { + Label skip_allocation, allocated; + Register overwritable_operand = mode_ == OVERWRITE_LEFT ? r4 : r3; + // If the overwritable operand is already an object, we skip the + // allocation of a heap number. + __ JumpIfNotSmi(overwritable_operand, &skip_allocation); + // Allocate a heap number for the result. + __ AllocateHeapNumber( + result, scratch1, scratch2, heap_number_map, gc_required); + __ b(&allocated); + __ bind(&skip_allocation); + // Use object holding the overwritable operand for result. + __ mr(result, overwritable_operand); + __ bind(&allocated); + } else { + ASSERT(mode_ == NO_OVERWRITE); + __ AllocateHeapNumber( + result, scratch1, scratch2, heap_number_map, gc_required); + } +} + + +void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { + __ Push(r4, r3); +} + + +void TranscendentalCacheStub::Generate(MacroAssembler* masm) { + // Untagged case: double input in d2, double result goes + // into d2. + // Tagged case: tagged input on top of stack and in r3, + // tagged result (heap number) goes into r3. + + Label input_not_smi; + Label loaded; + Label calculate; + Label invalid_cache; + const Register scratch0 = r22; + const Register scratch1 = r10; + const Register cache_entry = r3; + const bool tagged = (argument_type_ == TAGGED); + + if (tagged) { + // Argument is a number and is on stack and in r3. + // Load argument and check if it is a smi. + __ JumpIfNotSmi(r3, &input_not_smi); + + // Input is a smi. Convert to double and load the low and high words + // of the double into r5, r6. + __ SmiToDoubleFPRegister(r3, d6, scratch0); + __ subi(sp, sp, Operand(8)); + __ stfd(d6, MemOperand(sp, 0)); +#if __FLOAT_WORD_ORDER == __LITTLE_ENDIAN + __ lwz(r5, MemOperand(sp)); + __ lwz(r6, MemOperand(sp, 4)); +#else + __ lwz(r5, MemOperand(sp, 4)); + __ lwz(r6, MemOperand(sp)); +#endif + __ addi(sp, sp, Operand(8)); + __ b(&loaded); + + __ bind(&input_not_smi); + // Check if input is a HeapNumber. + __ CheckMap(r3, + r4, + Heap::kHeapNumberMapRootIndex, + &calculate, + DONT_DO_SMI_CHECK); + // Input is a HeapNumber. Load it to a double register and store the + // low and high words into r5, r6. + __ lwz(r6, FieldMemOperand(r3, HeapNumber::kExponentOffset)); + __ lwz(r5, FieldMemOperand(r3, HeapNumber::kMantissaOffset)); + } else { + // Input is untagged double in d2. Output goes to d2. + __ subi(sp, sp, Operand(8)); + __ stfd(d2, MemOperand(sp, 0)); +#if __FLOAT_WORD_ORDER == __LITTLE_ENDIAN + __ lwz(r5, MemOperand(sp, 4)); + __ lwz(r6, MemOperand(sp)); +#else + __ lwz(r5, MemOperand(sp)); + __ lwz(r6, MemOperand(sp, 4)); +#endif + __ addi(sp, sp, Operand(8)); + } + __ bind(&loaded); + // r5 = low 32 bits of double value + // r6 = high 32 bits of double value + // Compute hash (the shifts are arithmetic): + // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); + __ xor_(r4, r5, r6); + __ srawi(scratch0, r4, 16); + __ xor_(r4, r4, scratch0); + __ srawi(scratch0, r4, 8); + __ xor_(r4, r4, scratch0); + ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize)); + __ andi(r4, r4, Operand(TranscendentalCache::SubCache::kCacheSize - 1)); + + // r5 = low 32 bits of double value. + // r6 = high 32 bits of double value. + // r4 = TranscendentalCache::hash(double value). + Isolate* isolate = masm->isolate(); + ExternalReference cache_array = + ExternalReference::transcendental_cache_array_address(isolate); + __ mov(cache_entry, Operand(cache_array)); + // cache_entry points to cache array. + int cache_array_index + = type_ * sizeof(isolate->transcendental_cache()->caches_[0]); + __ LoadP(cache_entry, MemOperand(cache_entry, cache_array_index), r0); + // r3 points to the cache for the type type_. + // If NULL, the cache hasn't been initialized yet, so go through runtime. + __ cmpi(cache_entry, Operand(0, RelocInfo::NONE)); + __ beq(&invalid_cache); + +#ifdef DEBUG + // Check that the layout of cache elements match expectations. + { TranscendentalCache::SubCache::Element test_elem[2]; + char* elem_start = reinterpret_cast(&test_elem[0]); + char* elem2_start = reinterpret_cast(&test_elem[1]); + char* elem_in0 = reinterpret_cast(&(test_elem[0].in[0])); + char* elem_in1 = reinterpret_cast(&(test_elem[0].in[1])); + char* elem_out = reinterpret_cast(&(test_elem[0].output)); + // Two uint_32's and a pointer. +#if V8_TARGET_ARCH_PPC64 + CHECK_EQ(16, static_cast(elem2_start - elem_start)); +#else + CHECK_EQ(12, static_cast(elem2_start - elem_start)); +#endif + CHECK_EQ(0, static_cast(elem_in0 - elem_start)); + CHECK_EQ(kIntSize, static_cast(elem_in1 - elem_start)); + CHECK_EQ(2 * kIntSize, static_cast(elem_out - elem_start)); + } +#endif + +#if V8_TARGET_ARCH_PPC64 + // Find the address of the r4'th entry in the cache, i.e., &r3[r4*16]. + __ ShiftLeftImm(scratch0, r4, Operand(4)); +#else + // Find the address of the r4'th entry in the cache, i.e., &r3[r4*12]. + __ ShiftLeftImm(scratch0, r4, Operand(1)); + __ add(r4, r4, scratch0); + __ ShiftLeftImm(scratch0, r4, Operand(2)); +#endif + __ add(cache_entry, cache_entry, scratch0); + // Check if cache matches: Double value is stored in uint32_t[2] array. + __ lwz(r7, MemOperand(cache_entry, 0)); + __ lwz(r8, MemOperand(cache_entry, 4)); + __ LoadP(r9, MemOperand(cache_entry, 8)); + __ cmp(r5, r7); + __ bne(&calculate); + __ cmp(r6, r8); + __ bne(&calculate); + // Cache hit. Load result, cleanup and return. + Counters* counters = masm->isolate()->counters(); + __ IncrementCounter( + counters->transcendental_cache_hit(), 1, scratch0, scratch1); + if (tagged) { + // Pop input value from stack and load result into r3. + __ pop(); + __ mr(r3, r9); + } else { + // Load result into d2. + __ lfd(d2, FieldMemOperand(r9, HeapNumber::kValueOffset)); + } + __ Ret(); + + __ bind(&calculate); + __ IncrementCounter( + counters->transcendental_cache_miss(), 1, scratch0, scratch1); + if (tagged) { + __ bind(&invalid_cache); + ExternalReference runtime_function = + ExternalReference(RuntimeFunction(), masm->isolate()); + __ TailCallExternalReference(runtime_function, 1, 1); + } else { + Label no_update; + Label skip_cache; + + // Call C function to calculate the result and update the cache. + // r3: precalculated cache entry address. + // r5 and r6: parts of the double value. + // Store r3, r5 and r6 on stack for later before calling C function. + __ Push(r6, r5, cache_entry); + GenerateCallCFunction(masm, scratch0); + __ GetCFunctionDoubleResult(d2); + + // Try to update the cache. If we cannot allocate a + // heap number, we return the result without updating. + __ Pop(r6, r5, cache_entry); + __ LoadRoot(r8, Heap::kHeapNumberMapRootIndex); + __ AllocateHeapNumber(r9, scratch0, scratch1, r8, &no_update); + __ stfd(d2, FieldMemOperand(r9, HeapNumber::kValueOffset)); + __ stw(r5, MemOperand(cache_entry, 0)); + __ stw(r6, MemOperand(cache_entry, 4)); + __ StoreP(r9, MemOperand(cache_entry, 8)); + __ Ret(); + + __ bind(&invalid_cache); + // The cache is invalid. Call runtime which will recreate the + // cache. + __ LoadRoot(r8, Heap::kHeapNumberMapRootIndex); + __ AllocateHeapNumber(r3, scratch0, scratch1, r8, &skip_cache); + __ stfd(d2, FieldMemOperand(r3, HeapNumber::kValueOffset)); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(r3); + __ CallRuntime(RuntimeFunction(), 1); + } + __ lfd(d2, FieldMemOperand(r3, HeapNumber::kValueOffset)); + __ Ret(); + + __ bind(&skip_cache); + // Call C function to calculate the result and answer directly + // without updating the cache. + GenerateCallCFunction(masm, scratch0); + __ GetCFunctionDoubleResult(d2); + __ bind(&no_update); + + // We return the value in d2 without adding it to the cache, but + // we cause a scavenging GC so that future allocations will succeed. + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Allocate an aligned object larger than a HeapNumber. + ASSERT(2 * kDoubleSize >= HeapNumber::kSize); + __ LoadSmiLiteral(scratch0, Smi::FromInt(2 * kDoubleSize)); + __ push(scratch0); + __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); + } + __ Ret(); + } +} + + +void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, + Register scratch) { + Isolate* isolate = masm->isolate(); + + __ mflr(r0); + __ push(r0); + __ PrepareCallCFunction(0, 1, scratch); + __ fmr(d1, d2); + AllowExternalCallThatCantCauseGC scope(masm); + switch (type_) { + case TranscendentalCache::SIN: + __ CallCFunction(ExternalReference::math_sin_double_function(isolate), + 0, 1); + break; + case TranscendentalCache::COS: + __ CallCFunction(ExternalReference::math_cos_double_function(isolate), + 0, 1); + break; + case TranscendentalCache::TAN: + __ CallCFunction(ExternalReference::math_tan_double_function(isolate), + 0, 1); + break; + case TranscendentalCache::LOG: + __ CallCFunction(ExternalReference::math_log_double_function(isolate), + 0, 1); + break; + default: + UNIMPLEMENTED(); + break; + } + __ pop(r0); + __ mtlr(r0); +} + + +Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { + switch (type_) { + // Add more cases when necessary. + case TranscendentalCache::SIN: return Runtime::kMath_sin; + case TranscendentalCache::COS: return Runtime::kMath_cos; + case TranscendentalCache::TAN: return Runtime::kMath_tan; + case TranscendentalCache::LOG: return Runtime::kMath_log; + default: + UNIMPLEMENTED(); + return Runtime::kAbort; + } +} + + +void StackCheckStub::Generate(MacroAssembler* masm) { + __ TailCallRuntime(Runtime::kStackGuard, 0, 1); +} + + +void InterruptStub::Generate(MacroAssembler* masm) { + __ TailCallRuntime(Runtime::kInterrupt, 0, 1); +} + + +void MathPowStub::Generate(MacroAssembler* masm) { + const Register base = r4; + const Register exponent = r5; + const Register heapnumbermap = r8; + const Register heapnumber = r3; + const DoubleRegister double_base = d1; + const DoubleRegister double_exponent = d2; + const DoubleRegister double_result = d3; + const DoubleRegister double_scratch = d0; + const Register scratch = r22; + const Register scratch2 = r10; + + Label call_runtime, done, int_exponent; + if (exponent_type_ == ON_STACK) { + Label base_is_smi, unpack_exponent; + // The exponent and base are supplied as arguments on the stack. + // This can only happen if the stub is called from non-optimized code. + // Load input parameters from stack to double registers. + __ LoadP(base, MemOperand(sp, 1 * kPointerSize)); + __ LoadP(exponent, MemOperand(sp, 0 * kPointerSize)); + + __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex); + + __ UntagAndJumpIfSmi(scratch, base, &base_is_smi); + __ LoadP(scratch, FieldMemOperand(base, JSObject::kMapOffset)); + __ cmp(scratch, heapnumbermap); + __ bne(&call_runtime); + + __ lfd(double_base, FieldMemOperand(base, HeapNumber::kValueOffset)); + __ b(&unpack_exponent); + + __ bind(&base_is_smi); + FloatingPointHelper::ConvertIntToDouble(masm, scratch, double_base); + __ bind(&unpack_exponent); + + __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); + __ LoadP(scratch, FieldMemOperand(exponent, JSObject::kMapOffset)); + __ cmp(scratch, heapnumbermap); + __ bne(&call_runtime); + + __ lfd(double_exponent, + FieldMemOperand(exponent, HeapNumber::kValueOffset)); + } else if (exponent_type_ == TAGGED) { + // Base is already in double_base. + __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); + + __ lfd(double_exponent, + FieldMemOperand(exponent, HeapNumber::kValueOffset)); + } + + if (exponent_type_ != INTEGER) { + // Detect integer exponents stored as double. + __ EmitVFPTruncate(kRoundToZero, + scratch, + double_exponent, + scratch2, + double_scratch, + kCheckForInexactConversion); + __ beq(&int_exponent); + + if (exponent_type_ == ON_STACK) { + // Detect square root case. Crankshaft detects constant +/-0.5 at + // compile time and uses DoMathPowHalf instead. We then skip this check + // for non-constant cases of +/-0.5 as these hardly occur. + Label not_plus_half, not_minus_inf1, not_minus_inf2; + + // Test for 0.5. + __ LoadDoubleLiteral(double_scratch, 0.5, scratch); + __ fcmpu(double_exponent, double_scratch); + __ bne(¬_plus_half); + + // Calculates square root of base. Check for the special case of + // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13). + __ LoadDoubleLiteral(double_scratch, -V8_INFINITY, scratch); + __ fcmpu(double_base, double_scratch); + __ bne(¬_minus_inf1); + __ fneg(double_result, double_scratch); + __ b(&done); + __ bind(¬_minus_inf1); + + // Add +0 to convert -0 to +0. + __ fadd(double_scratch, double_base, kDoubleRegZero); + __ fsqrt(double_result, double_scratch); + __ b(&done); + + __ bind(¬_plus_half); + __ LoadDoubleLiteral(double_scratch, -0.5, scratch); + __ fcmpu(double_exponent, double_scratch); + __ bne(&call_runtime); + + // Calculates square root of base. Check for the special case of + // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13). + __ LoadDoubleLiteral(double_scratch, -V8_INFINITY, scratch); + __ fcmpu(double_base, double_scratch); + __ bne(¬_minus_inf2); + __ fmr(double_result, kDoubleRegZero); + __ b(&done); + __ bind(¬_minus_inf2); + + // Add +0 to convert -0 to +0. + __ fadd(double_scratch, double_base, kDoubleRegZero); + __ LoadDoubleLiteral(double_result, 1.0, scratch); + __ fsqrt(double_scratch, double_scratch); + __ fdiv(double_result, double_result, double_scratch); + __ b(&done); + } + + __ mflr(r0); + __ push(r0); + { + AllowExternalCallThatCantCauseGC scope(masm); + __ PrepareCallCFunction(0, 2, scratch); + __ SetCallCDoubleArguments(double_base, double_exponent); + __ CallCFunction( + ExternalReference::power_double_double_function(masm->isolate()), + 0, 2); + } + __ pop(r0); + __ mtlr(r0); + __ GetCFunctionDoubleResult(double_result); + __ b(&done); + } + + // Calculate power with integer exponent. + __ bind(&int_exponent); + + // Get two copies of exponent in the registers scratch and exponent. + if (exponent_type_ == INTEGER) { + __ mr(scratch, exponent); + } else { + // Exponent has previously been stored into scratch as untagged integer. + __ mr(exponent, scratch); + } + __ fmr(double_scratch, double_base); // Back up base. + __ li(scratch2, Operand(1)); + FloatingPointHelper::ConvertIntToDouble(masm, scratch2, double_result); + + // Get absolute value of exponent. + Label positive_exponent; + __ cmpi(scratch, Operand::Zero()); + __ bge(&positive_exponent); + __ neg(scratch, scratch); + __ bind(&positive_exponent); + + Label while_true, no_carry, loop_end; + __ bind(&while_true); + __ andi(scratch2, scratch, Operand(1)); + __ beq(&no_carry, cr0); + __ fmul(double_result, double_result, double_scratch); + __ bind(&no_carry); + __ ShiftRightArithImm(scratch, scratch, 1, SetRC); + __ beq(&loop_end, cr0); + __ fmul(double_scratch, double_scratch, double_scratch); + __ b(&while_true); + __ bind(&loop_end); + + __ cmpi(exponent, Operand::Zero()); + __ bge(&done); + + __ li(scratch2, Operand(1)); + FloatingPointHelper::ConvertIntToDouble(masm, scratch2, double_scratch); + __ fdiv(double_result, double_scratch, double_result); + // Test whether result is zero. Bail out to check for subnormal result. + // Due to subnormals, x^-y == (1/x)^y does not hold in all cases. + __ fcmpu(double_result, kDoubleRegZero); + __ bne(&done); + // double_exponent may not containe the exponent value if the input was a + // smi. We set it with exponent value before bailing out. + FloatingPointHelper::ConvertIntToDouble(masm, exponent, double_exponent); + + // Returning or bailing out. + Counters* counters = masm->isolate()->counters(); + if (exponent_type_ == ON_STACK) { + // The arguments are still on the stack. + __ bind(&call_runtime); + __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1); + + // The stub is called from non-optimized code, which expects the result + // as heap number in exponent. + __ bind(&done); + __ AllocateHeapNumber( + heapnumber, scratch, scratch2, heapnumbermap, &call_runtime); + __ stfd(double_result, + FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); + ASSERT(heapnumber.is(r3)); + __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2); + __ Ret(2); + } else { + __ mflr(r0); + __ push(r0); + { + AllowExternalCallThatCantCauseGC scope(masm); + __ PrepareCallCFunction(0, 2, scratch); + __ SetCallCDoubleArguments(double_base, double_exponent); + __ CallCFunction( + ExternalReference::power_double_double_function(masm->isolate()), + 0, 2); + } + __ pop(r0); + __ mtlr(r0); + __ GetCFunctionDoubleResult(double_result); + + __ bind(&done); + __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2); + __ Ret(); + } +} + + +bool CEntryStub::NeedsImmovableCode() { + return true; +} + + +bool CEntryStub::IsPregenerated() { + return (!save_doubles_ || ISOLATE->fp_stubs_generated()) && + result_size_ == 1; +} + + +void CodeStub::GenerateStubsAheadOfTime() { + CEntryStub::GenerateAheadOfTime(); + StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(); + RecordWriteStub::GenerateFixedRegStubsAheadOfTime(); +} + + +void CodeStub::GenerateFPStubs() { + CEntryStub save_doubles(1, kSaveFPRegs); + Handle code = save_doubles.GetCode(); + code->set_is_pregenerated(true); + StoreBufferOverflowStub stub(kSaveFPRegs); + stub.GetCode()->set_is_pregenerated(true); + code->GetIsolate()->set_fp_stubs_generated(true); +} + + +void CEntryStub::GenerateAheadOfTime() { + CEntryStub stub(1, kDontSaveFPRegs); + Handle code = stub.GetCode(); + code->set_is_pregenerated(true); +} + + +void CEntryStub::GenerateCore(MacroAssembler* masm, + Label* throw_normal_exception, + Label* throw_termination_exception, + Label* throw_out_of_memory_exception, + bool do_gc, + bool always_allocate) { + // r3: result parameter for PerformGC, if any + // r14: number of arguments including receiver (C callee-saved) + // r15: pointer to builtin function (C callee-saved) + // r16: pointer to the first argument (C callee-saved) + Isolate* isolate = masm->isolate(); + Register isolate_reg = no_reg; + + if (do_gc) { + // Passing r3. + __ PrepareCallCFunction(1, 0, r4); + __ CallCFunction(ExternalReference::perform_gc_function(isolate), + 1, 0); + } + + ExternalReference scope_depth = + ExternalReference::heap_always_allocate_scope_depth(isolate); + if (always_allocate) { + __ mov(r3, Operand(scope_depth)); + __ lwz(r4, MemOperand(r3)); + __ addi(r4, r4, Operand(1)); + __ stw(r4, MemOperand(r3)); + } + + // PPC LINUX ABI: + // The #if below used to be !USE_SIMULATOR but needed + // to change to support nativesim=true builds +#if defined(V8_HOST_ARCH_PPC64) || defined(V8_HOST_ARCH_PPC) + // Call C built-in on native hardware. +#if defined(V8_TARGET_ARCH_PPC64) + +#if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS + if (result_size_ < 2) { +#if __BYTE_ORDER == __LITTLE_ENDIAN + __ mr(r3, r14); +#else + // r3 = argc << 32 (for alignment), r4 = argv + __ ShiftLeftImm(r3, r14, Operand(32)); +#endif + __ mr(r4, r16); + isolate_reg = r5; + } else { + ASSERT_EQ(2, result_size_); + // The return value is 16-byte non-scalar value. + // Use frame storage reserved by calling function to pass return + // buffer as implicit first argument. + __ addi(r3, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize)); +#if __BYTE_ORDER == __LITTLE_ENDIAN + __ mr(r4, r14); +#else + // r4 = argc << 32 (for alignment), r5 = argv + __ ShiftLeftImm(r4, r14, Operand(32)); +#endif + __ mr(r5, r16); + isolate_reg = r6; + } +#else +#if __BYTE_ORDER == __LITTLE_ENDIAN + __ mr(r3, r14); +#else + // r3 = argc << 32 (for alignment), r4 = argv + __ ShiftLeftImm(r3, r14, Operand(32)); +#endif + __ mr(r4, r16); + isolate_reg = r5; +#endif + +#elif defined(_AIX) // 32-bit AIX + // r3 = argc, r4 = argv + __ mr(r3, r14); + __ mr(r4, r16); + isolate_reg = r5; +#else // 32-bit linux + // Use frame storage reserved by calling function + // PPC passes C++ objects by reference not value + // This builds an object in the stack frame + __ addi(r3, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize)); + __ StoreP(r14, MemOperand(r3)); + __ StoreP(r16, MemOperand(r3, kPointerSize)); + isolate_reg = r4; +#endif +#else // Simulated + // Call C built-in using simulator. + // r3 = argc, r4 = argv +#if defined(V8_TARGET_ARCH_PPC64) && __BYTE_ORDER == __BIG_ENDIAN + __ ShiftLeftImm(r3, r14, Operand(32)); +#else + __ mr(r3, r14); +#endif + __ mr(r4, r16); + isolate_reg = r5; +#endif + + __ mov(isolate_reg, Operand(ExternalReference::isolate_address())); + +#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR) + // Native AIX/PPC64 Linux use a function descriptor. + __ LoadP(ToRegister(2), MemOperand(r15, kPointerSize)); // TOC + __ LoadP(ip, MemOperand(r15, 0)); // Instruction address + Register target = ip; +#elif ABI_TOC_ADDRESSABILITY_VIA_IP + Register target = ip; + __ Move(ip, r15); +#else + Register target = r15; +#endif + + // To let the GC traverse the return address of the exit frames, we need to + // know where the return address is. The CEntryStub is unmovable, so + // we can store the address on the stack to be able to find it again and + // we never have to restore it, because it will not change. + // Compute the return address in lr to return to after the jump below. Pc is + // already at '+ 8' from the current instruction but return is after three + // instructions so add another 4 to pc to get the return address. + { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm); + Label here; + __ b(&here, SetLK); + __ bind(&here); + __ mflr(r8); + +// Constant used below is dependent on size of Call() macro instructions + __ addi(r0, r8, Operand(20)); + + __ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize)); + __ Call(target); + } + + if (always_allocate) { + // It's okay to clobber r5 and r6 here. Don't mess with r3 and r4 + // though (contain the result). + __ mov(r5, Operand(scope_depth)); + __ lwz(r6, MemOperand(r5)); + __ subi(r6, r6, Operand(1)); + __ stw(r6, MemOperand(r5)); + } + + // check for failure result + Label failure_returned; + STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); +#if defined(V8_TARGET_ARCH_PPC64) && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS + // If return value is on the stack, pop it to registers. + if (result_size_ > 1) { + ASSERT_EQ(2, result_size_); + __ LoadP(r4, MemOperand(r3, kPointerSize)); + __ LoadP(r3, MemOperand(r3)); + } +#endif + // Lower 2 bits of r5 are 0 iff r3 has failure tag. + __ addi(r5, r3, Operand(1)); + STATIC_ASSERT(kFailureTagMask < 0x8000); + __ andi(r0, r5, Operand(kFailureTagMask)); + __ beq(&failure_returned, cr0); + + // Exit C frame and return. + // r3:r4: result + // sp: stack pointer + // fp: frame pointer + // Callee-saved register r14 still holds argc. + __ LeaveExitFrame(save_doubles_, r14); + __ blr(); + + // check if we should retry or throw exception + Label retry; + __ bind(&failure_returned); + STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); + __ andi(r0, r3, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize)); + __ beq(&retry, cr0); + + // Special handling of out of memory exceptions. + Failure* out_of_memory = Failure::OutOfMemoryException(); + __ cmpi(r3, Operand(reinterpret_cast(out_of_memory))); + __ beq(throw_out_of_memory_exception); + + // Retrieve the pending exception and clear the variable. + __ mov(r6, Operand(isolate->factory()->the_hole_value())); + __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, + isolate))); + __ LoadP(r3, MemOperand(ip)); + __ StoreP(r6, MemOperand(ip)); + + // Special handling of termination exceptions which are uncatchable + // by javascript code. + __ mov(r6, Operand(isolate->factory()->termination_exception())); + __ cmp(r3, r6); + __ beq(throw_termination_exception); + + // Handle normal exception. + __ b(throw_normal_exception); + + __ bind(&retry); // pass last failure (r3) as parameter (r3) when retrying +} + + +void CEntryStub::Generate(MacroAssembler* masm) { + // Called from JavaScript; parameters are on stack as if calling JS function + // r3: number of arguments including receiver + // r4: pointer to builtin function + // fp: frame pointer (restored after C call) + // sp: stack pointer (restored as callee's sp after C call) + // cp: current context (C callee-saved) + + // Result returned in r3 or r3+r4 by default. + + // NOTE: Invocations of builtins may return failure objects + // instead of a proper result. The builtin entry handles + // this by performing a garbage collection and retrying the + // builtin once. + + // Compute the argv pointer in a callee-saved register. + __ ShiftLeftImm(r16, r3, Operand(kPointerSizeLog2)); + __ add(r16, r16, sp); + __ subi(r16, r16, Operand(kPointerSize)); + + // Enter the exit frame that transitions from JavaScript to C++. + FrameScope scope(masm, StackFrame::MANUAL); + + // Need at least one extra slot for return address location. + int arg_stack_space = 1; + + // PPC LINUX ABI: + // The #if immediately below was !USE_SIMULATOR, but needed + // to change to support nativesim=true builds +#if defined(V8_HOST_ARCH_PPC64) || defined(V8_HOST_ARCH_PPC) +#if defined(V8_TARGET_ARCH_PPC64) && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS + // Pass buffer for return value on stack if necessary + if (result_size_ > 1) { + ASSERT_EQ(2, result_size_); + arg_stack_space += 2; + } +#elif !defined(_AIX) + // 32-bit linux + // Pass C++ objects by reference not value + arg_stack_space += 2; +#endif +#endif + + __ EnterExitFrame(save_doubles_, arg_stack_space); + + // Set up argc and the builtin function in callee-saved registers. + __ mr(r14, r3); + __ mr(r15, r4); + + // r14: number of arguments (C callee-saved) + // r15: pointer to builtin function (C callee-saved) + // r16: pointer to first argument (C callee-saved) + + Label throw_normal_exception; + Label throw_termination_exception; + Label throw_out_of_memory_exception; + + // Call into the runtime system. + GenerateCore(masm, + &throw_normal_exception, + &throw_termination_exception, + &throw_out_of_memory_exception, + false, + false); + + // Do space-specific GC and retry runtime call. + GenerateCore(masm, + &throw_normal_exception, + &throw_termination_exception, + &throw_out_of_memory_exception, + true, + false); + + // Do full GC and retry runtime call one final time. + Failure* failure = Failure::InternalError(); + __ mov(r3, Operand(reinterpret_cast(failure))); + GenerateCore(masm, + &throw_normal_exception, + &throw_termination_exception, + &throw_out_of_memory_exception, + true, + true); + + __ bind(&throw_out_of_memory_exception); + // Set external caught exception to false. + Isolate* isolate = masm->isolate(); + ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress, + isolate); + __ li(r3, Operand(false, RelocInfo::NONE)); + __ mov(r5, Operand(external_caught)); + __ StoreP(r3, MemOperand(r5)); + + // Set pending exception and r0 to out of memory exception. + Failure* out_of_memory = Failure::OutOfMemoryException(); + __ mov(r3, Operand(reinterpret_cast(out_of_memory))); + __ mov(r5, Operand(ExternalReference(Isolate::kPendingExceptionAddress, + isolate))); + __ StoreP(r3, MemOperand(r5)); + // Fall through to the next label. + + __ bind(&throw_termination_exception); + __ ThrowUncatchable(r3); + + __ bind(&throw_normal_exception); + __ Throw(r3); +} + + +void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { + // r3: code entry + // r4: function + // r5: receiver + // r6: argc + // [sp+0]: argv + + Label invoke, handler_entry, exit; + + // Called from C +#if ABI_USES_FUNCTION_DESCRIPTORS + __ function_descriptor(); +#endif + + // PPC LINUX ABI: + // preserve LR in pre-reserved slot in caller's frame + __ mflr(r0); + __ StoreP(r0, MemOperand(sp, kStackFrameLRSlot * kPointerSize)); + + // Save callee saved registers on the stack. + __ MultiPush(kCalleeSaved); + + // Floating point regs FPR0 - FRP13 are volatile + // FPR14-FPR31 are non-volatile, but sub-calls will save them for us + +// int offset_to_argv = kPointerSize * 22; // matches (22*4) above +// __ lwz(r7, MemOperand(sp, offset_to_argv)); + + // Push a frame with special values setup to mark it as an entry frame. + // r3: code entry + // r4: function + // r5: receiver + // r6: argc + // r7: argv + Isolate* isolate = masm->isolate(); + __ li(r0, Operand(-1)); // Push a bad frame pointer to fail if it is used. + __ push(r0); + int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; + __ LoadSmiLiteral(r0, Smi::FromInt(marker)); + __ push(r0); + __ push(r0); + // Save copies of the top frame descriptor on the stack. + __ mov(r8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate))); + __ LoadP(r0, MemOperand(r8)); + __ push(r0); + + // Set up frame pointer for the frame to be pushed. + __ addi(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); + + // If this is the outermost JS call, set js_entry_sp value. + Label non_outermost_js; + ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate); + __ mov(r8, Operand(ExternalReference(js_entry_sp))); + __ LoadP(r9, MemOperand(r8)); + __ cmpi(r9, Operand::Zero()); + __ bne(&non_outermost_js); + __ StoreP(fp, MemOperand(r8)); + __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)); + Label cont; + __ b(&cont); + __ bind(&non_outermost_js); + __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)); + __ bind(&cont); + __ push(ip); // frame-type + + // Jump to a faked try block that does the invoke, with a faked catch + // block that sets the pending exception. + __ b(&invoke); + + __ bind(&handler_entry); + handler_offset_ = handler_entry.pos(); + // Caught exception: Store result (exception) in the pending exception + // field in the JSEnv and return a failure sentinel. Coming in here the + // fp will be invalid because the PushTryHandler below sets it to 0 to + // signal the existence of the JSEntry frame. + __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, + isolate))); + + __ StoreP(r3, MemOperand(ip)); + __ mov(r3, Operand(reinterpret_cast(Failure::Exception()))); + __ b(&exit); + + // Invoke: Link this frame into the handler chain. There's only one + // handler block in this code object, so its index is 0. + __ bind(&invoke); + // Must preserve r0-r4, r5-r7 are available. (needs update for PPC) + __ PushTryHandler(StackHandler::JS_ENTRY, 0); + // If an exception not caught by another handler occurs, this handler + // returns control to the code after the b(&invoke) above, which + // restores all kCalleeSaved registers (including cp and fp) to their + // saved values before returning a failure to C. + + // Clear any pending exceptions. + __ mov(r8, Operand(isolate->factory()->the_hole_value())); + __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, + isolate))); + __ StoreP(r8, MemOperand(ip)); + + // Invoke the function by calling through JS entry trampoline builtin. + // Notice that we cannot store a reference to the trampoline code directly in + // this stub, because runtime stubs are not traversed when doing GC. + + // Expected registers by Builtins::JSEntryTrampoline + // r3: code entry + // r4: function + // r5: receiver + // r6: argc + // r7: argv + if (is_construct) { + ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline, + isolate); + __ mov(ip, Operand(construct_entry)); + } else { + ExternalReference entry(Builtins::kJSEntryTrampoline, isolate); + __ mov(ip, Operand(entry)); + } + __ LoadP(ip, MemOperand(ip)); // deref address + + // Branch and link to JSEntryTrampoline. + // the address points to the start of the code object, skip the header + __ addi(r0, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ mtlr(r0); + __ bclr(BA, SetLK); // make the call + + // Unlink this frame from the handler chain. + __ PopTryHandler(); + + __ bind(&exit); // r3 holds result + // Check if the current stack frame is marked as the outermost JS frame. + Label non_outermost_js_2; + __ pop(r8); + __ CmpSmiLiteral(r8, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME), r0); + __ bne(&non_outermost_js_2); + __ mov(r9, Operand::Zero()); + __ mov(r8, Operand(ExternalReference(js_entry_sp))); + __ StoreP(r9, MemOperand(r8)); + __ bind(&non_outermost_js_2); + + // Restore the top frame descriptors from the stack. + __ pop(r6); + __ mov(ip, + Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate))); + __ StoreP(r6, MemOperand(ip)); + + // Reset the stack to the callee saved registers. + __ addi(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); + + // Restore callee-saved registers and return. +#ifdef DEBUG + if (FLAG_debug_code) { + Label here; + __ b(&here, SetLK); + __ bind(&here); + } +#endif + + __ MultiPop(kCalleeSaved); + + __ LoadP(r0, MemOperand(sp, kStackFrameLRSlot * kPointerSize)); + __ mtctr(r0); + __ bcr(); +} + + +// Uses registers r3 to r7. +// Expected input (depending on whether args are in registers or on the stack): +// * object: r3 or at sp + 1 * kPointerSize. +// * function: r4 or at sp. +// +// An inlined call site may have been generated before calling this stub. +// In this case the offset to the inline site to patch is passed on the stack, +// in the safepoint slot for register r7. +// (See LCodeGen::DoInstanceOfKnownGlobal) +void InstanceofStub::Generate(MacroAssembler* masm) { + // Call site inlining and patching implies arguments in registers. + ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck()); + // ReturnTrueFalse is only implemented for inlined call sites. + ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck()); + + // Fixed register usage throughout the stub: + const Register object = r3; // Object (lhs). + Register map = r6; // Map of the object. + const Register function = r4; // Function (rhs). + const Register prototype = r7; // Prototype of the function. + const Register inline_site = r9; + const Register scratch = r5; + const Register scratch2 = r8; + Register scratch3 = no_reg; + +#if V8_TARGET_ARCH_PPC64 + const int32_t kDeltaToLoadBoolResult = 9 * Assembler::kInstrSize; +#else + const int32_t kDeltaToLoadBoolResult = 5 * Assembler::kInstrSize; +#endif + + Label slow, loop, is_instance, is_not_instance, not_js_object; + + if (!HasArgsInRegisters()) { + __ LoadP(object, MemOperand(sp, 1 * kPointerSize)); + __ LoadP(function, MemOperand(sp, 0)); + } + + // Check that the left hand is a JS object and load map. + __ JumpIfSmi(object, ¬_js_object); + __ IsObjectJSObjectType(object, map, scratch, ¬_js_object); + + // If there is a call site cache don't look in the global cache, but do the + // real lookup and update the call site cache. + if (!HasCallSiteInlineCheck()) { + Label miss; + __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex); + __ bne(&miss); + __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex); + __ bne(&miss); + __ LoadRoot(r3, Heap::kInstanceofCacheAnswerRootIndex); + __ Ret(HasArgsInRegisters() ? 0 : 2); + + __ bind(&miss); + } + + // Get the prototype of the function. + __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true); + + // Check that the function prototype is a JS object. + __ JumpIfSmi(prototype, &slow); + __ IsObjectJSObjectType(prototype, scratch, scratch, &slow); + + // Update the global instanceof or call site inlined cache with the current + // map and function. The cached answer will be set when it is known below. + if (!HasCallSiteInlineCheck()) { + __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); + __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); + } else { + ASSERT(HasArgsInRegisters()); + // Patch the (relocated) inlined map check. + + // The offset was stored in r7 safepoint slot. + // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal) + __ LoadFromSafepointRegisterSlot(scratch, r7); + __ mflr(inline_site); + __ sub(inline_site, inline_site, scratch); + // Get the map location in scratch and patch it. + __ GetRelocatedValueLocation(inline_site, scratch, scratch2); + __ StoreP(map, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset), + r0); + } + + // Register mapping: r6 is object map and r7 is function prototype. + // Get prototype of object into r5. + __ LoadP(scratch, FieldMemOperand(map, Map::kPrototypeOffset)); + + // We don't need map any more. Use it as a scratch register. + scratch3 = map; + map = no_reg; + + // Loop through the prototype chain looking for the function prototype. + __ LoadRoot(scratch3, Heap::kNullValueRootIndex); + __ bind(&loop); + __ cmp(scratch, prototype); + __ beq(&is_instance); + __ cmp(scratch, scratch3); + __ beq(&is_not_instance); + __ LoadP(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); + __ LoadP(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset)); + __ b(&loop); + + __ bind(&is_instance); + if (!HasCallSiteInlineCheck()) { + __ LoadSmiLiteral(r3, Smi::FromInt(0)); + __ StoreRoot(r3, Heap::kInstanceofCacheAnswerRootIndex); + } else { + // Patch the call site to return true. + __ LoadRoot(r3, Heap::kTrueValueRootIndex); + __ addi(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); + // Get the boolean result location in scratch and patch it. + __ PatchRelocatedValue(inline_site, scratch, r3); + + if (!ReturnTrueFalseObject()) { + __ LoadSmiLiteral(r3, Smi::FromInt(0)); + } + } + __ Ret(HasArgsInRegisters() ? 0 : 2); + + __ bind(&is_not_instance); + if (!HasCallSiteInlineCheck()) { + __ LoadSmiLiteral(r3, Smi::FromInt(1)); + __ StoreRoot(r3, Heap::kInstanceofCacheAnswerRootIndex); + } else { + // Patch the call site to return false. + __ LoadRoot(r3, Heap::kFalseValueRootIndex); + __ addi(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); + // Get the boolean result location in scratch and patch it. + __ PatchRelocatedValue(inline_site, scratch, r3); + + if (!ReturnTrueFalseObject()) { + __ LoadSmiLiteral(r3, Smi::FromInt(1)); + } + } + __ Ret(HasArgsInRegisters() ? 0 : 2); + + Label object_not_null, object_not_null_or_smi; + __ bind(¬_js_object); + // Before null, smi and string value checks, check that the rhs is a function + // as for a non-function rhs an exception needs to be thrown. + __ JumpIfSmi(function, &slow); + __ CompareObjectType(function, scratch3, scratch, JS_FUNCTION_TYPE); + __ bne(&slow); + + // Null is not instance of anything. + __ Cmpi(scratch, Operand(masm->isolate()->factory()->null_value()), r0); + __ bne(&object_not_null); + __ LoadSmiLiteral(r3, Smi::FromInt(1)); + __ Ret(HasArgsInRegisters() ? 0 : 2); + + __ bind(&object_not_null); + // Smi values are not instances of anything. + __ JumpIfNotSmi(object, &object_not_null_or_smi); + __ LoadSmiLiteral(r3, Smi::FromInt(1)); + __ Ret(HasArgsInRegisters() ? 0 : 2); + + __ bind(&object_not_null_or_smi); + // String values are not instances of anything. + __ IsObjectJSStringType(object, scratch, &slow); + __ LoadSmiLiteral(r3, Smi::FromInt(1)); + __ Ret(HasArgsInRegisters() ? 0 : 2); + + // Slow-case. Tail call builtin. + __ bind(&slow); + if (!ReturnTrueFalseObject()) { + if (HasArgsInRegisters()) { + __ Push(r3, r4); + } + __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); + } else { + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(r3, r4); + __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); + } + Label true_value, done; + __ cmpi(r3, Operand::Zero()); + __ beq(&true_value); + + __ LoadRoot(r3, Heap::kFalseValueRootIndex); + __ b(&done); + + __ bind(&true_value); + __ LoadRoot(r3, Heap::kTrueValueRootIndex); + + __ bind(&done); + __ Ret(HasArgsInRegisters() ? 0 : 2); + } +} + + +Register InstanceofStub::left() { return r3; } + + +Register InstanceofStub::right() { return r4; } + + +void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { + // The displacement is the offset of the last parameter (if any) + // relative to the frame pointer. + const int kDisplacement = + StandardFrameConstants::kCallerSPOffset - kPointerSize; + + // Check that the key is a smi. + Label slow; + __ JumpIfNotSmi(r4, &slow); + + // Check if the calling frame is an arguments adaptor frame. + Label adaptor; + __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kContextOffset)); + STATIC_ASSERT(StackFrame::ARGUMENTS_ADAPTOR < 0x3fffu); + __ CmpSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0); + __ beq(&adaptor); + + // Check index against formal parameters count limit passed in + // through register r3. Use unsigned comparison to get negative + // check for free. + __ cmpl(r4, r3); + __ bge(&slow); + + // Read the argument from the stack and return it. + __ sub(r6, r3, r4); + __ SmiToPtrArrayOffset(r6, r6); + __ add(r6, fp, r6); + __ LoadP(r3, MemOperand(r6, kDisplacement)); + __ blr(); + + // Arguments adaptor case: Check index against actual arguments + // limit found in the arguments adaptor frame. Use unsigned + // comparison to get negative check for free. + __ bind(&adaptor); + __ LoadP(r3, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ cmpl(r4, r3); + __ bge(&slow); + + // Read the argument from the adaptor frame and return it. + __ sub(r6, r3, r4); + __ SmiToPtrArrayOffset(r6, r6); + __ add(r6, r5, r6); + __ LoadP(r3, MemOperand(r6, kDisplacement)); + __ blr(); + + // Slow-case: Handle non-smi or out-of-bounds access to arguments + // by calling the runtime system. + __ bind(&slow); + __ push(r4); + __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); +} + + +void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) { + // sp[0] : number of parameters + // sp[1] : receiver displacement + // sp[2] : function + + // Check if the calling frame is an arguments adaptor frame. + Label runtime; + __ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ LoadP(r5, MemOperand(r6, StandardFrameConstants::kContextOffset)); + STATIC_ASSERT(StackFrame::ARGUMENTS_ADAPTOR < 0x3fffu); + __ CmpSmiLiteral(r5, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0); + __ bne(&runtime); + + // Patch the arguments.length and the parameters pointer in the current frame. + __ LoadP(r5, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ StoreP(r5, MemOperand(sp, 0 * kPointerSize)); + __ SmiToPtrArrayOffset(r5, r5); + __ add(r6, r6, r5); + __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset)); + __ StoreP(r6, MemOperand(sp, 1 * kPointerSize)); + + __ bind(&runtime); + __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); +} + + +void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { + // Stack layout: + // sp[0] : number of parameters (tagged) + // sp[1] : address of receiver argument + // sp[2] : function + // Registers used over whole function: + // r9 : allocated object (tagged) + // r22 : mapped parameter count (tagged) + + __ LoadP(r4, MemOperand(sp, 0 * kPointerSize)); + // r4 = parameter count (tagged) + + // Check if the calling frame is an arguments adaptor frame. + Label runtime; + Label adaptor_frame, try_allocate; + __ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ LoadP(r5, MemOperand(r6, StandardFrameConstants::kContextOffset)); + STATIC_ASSERT(StackFrame::ARGUMENTS_ADAPTOR < 0x3fffu); + __ CmpSmiLiteral(r5, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0); + __ beq(&adaptor_frame); + + // No adaptor, parameter count = argument count. + __ mr(r5, r4); + __ b(&try_allocate); + + // We have an adaptor frame. Patch the parameters pointer. + __ bind(&adaptor_frame); + __ LoadP(r5, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ SmiToPtrArrayOffset(r7, r5); + __ add(r6, r6, r7); + __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset)); + __ StoreP(r6, MemOperand(sp, 1 * kPointerSize)); + + // r4 = parameter count (tagged) + // r5 = argument count (tagged) + // Compute the mapped parameter count = min(r4, r5) in r4. + Label skip; + __ cmp(r4, r5); + __ blt(&skip); + __ mr(r4, r5); + __ bind(&skip); + + __ bind(&try_allocate); + + // Compute the sizes of backing store, parameter map, and arguments object. + // 1. Parameter map, has 2 extra words containing context and backing store. + const int kParameterMapHeaderSize = + FixedArray::kHeaderSize + 2 * kPointerSize; + // If there are no mapped parameters, we do not need the parameter_map. + Label skip2, skip3; + __ CmpSmiLiteral(r4, Smi::FromInt(0), r0); + __ bne(&skip2); + __ li(r22, Operand::Zero()); + __ b(&skip3); + __ bind(&skip2); + __ SmiToPtrArrayOffset(r22, r4); + __ addi(r22, r22, Operand(kParameterMapHeaderSize)); + __ bind(&skip3); + + // 2. Backing store. + __ SmiToPtrArrayOffset(r7, r5); + __ add(r22, r22, r7); + __ addi(r22, r22, Operand(FixedArray::kHeaderSize)); + + // 3. Arguments object. + __ addi(r22, r22, Operand(Heap::kArgumentsObjectSize)); + + // Do the allocation of all three objects in one go. + __ AllocateInNewSpace(r22, r3, r6, r7, &runtime, TAG_OBJECT); + + // r3 = address of new object(s) (tagged) + // r5 = argument count (tagged) + // Get the arguments boilerplate from the current native context into r4. + const int kNormalOffset = + Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX); + const int kAliasedOffset = + Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX); + + __ LoadP(r7, MemOperand(r20, + Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); + __ LoadP(r7, FieldMemOperand(r7, GlobalObject::kNativeContextOffset)); + Label skip4, skip5; + __ cmpi(r4, Operand::Zero()); + __ bne(&skip4); + __ LoadP(r7, MemOperand(r7, kNormalOffset)); + __ b(&skip5); + __ bind(&skip4); + __ LoadP(r7, MemOperand(r7, kAliasedOffset)); + __ bind(&skip5); + + // r3 = address of new object (tagged) + // r4 = mapped parameter count (tagged) + // r5 = argument count (tagged) + // r7 = address of boilerplate object (tagged) + // Copy the JS object part. + for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) { + __ LoadP(r6, FieldMemOperand(r7, i)); + __ StoreP(r6, FieldMemOperand(r3, i), r0); + } + + // Set up the callee in-object property. + STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); + __ LoadP(r6, MemOperand(sp, 2 * kPointerSize)); + const int kCalleeOffset = JSObject::kHeaderSize + + Heap::kArgumentsCalleeIndex * kPointerSize; + __ StoreP(r6, FieldMemOperand(r3, kCalleeOffset), r0); + + // Use the length (smi tagged) and set that as an in-object property too. + STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); + const int kLengthOffset = JSObject::kHeaderSize + + Heap::kArgumentsLengthIndex * kPointerSize; + __ StoreP(r5, FieldMemOperand(r3, kLengthOffset), r0); + + // Set up the elements pointer in the allocated arguments object. + // If we allocated a parameter map, r7 will point there, otherwise + // it will point to the backing store. + __ addi(r7, r3, Operand(Heap::kArgumentsObjectSize)); + __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0); + + // r3 = address of new object (tagged) + // r4 = mapped parameter count (tagged) + // r5 = argument count (tagged) + // r7 = address of parameter map or backing store (tagged) + // Initialize parameter map. If there are no mapped arguments, we're done. + Label skip_parameter_map, skip6; + __ CmpSmiLiteral(r4, Smi::FromInt(0), r0); + __ bne(&skip6); + // Move backing store address to r6, because it is + // expected there when filling in the unmapped arguments. + __ mr(r6, r7); + __ b(&skip_parameter_map); + __ bind(&skip6); + + __ LoadRoot(r9, Heap::kNonStrictArgumentsElementsMapRootIndex); + __ StoreP(r9, FieldMemOperand(r7, FixedArray::kMapOffset), r0); + __ AddSmiLiteral(r9, r4, Smi::FromInt(2), r0); + __ StoreP(r9, FieldMemOperand(r7, FixedArray::kLengthOffset), r0); + __ StoreP(r20, FieldMemOperand(r7, + FixedArray::kHeaderSize + 0 * kPointerSize), + r0); + __ SmiToPtrArrayOffset(r9, r4); + __ add(r9, r7, r9); + __ addi(r9, r9, Operand(kParameterMapHeaderSize)); + __ StoreP(r9, FieldMemOperand(r7, + FixedArray::kHeaderSize + 1 * kPointerSize), + r0); + + // Copy the parameter slots and the holes in the arguments. + // We need to fill in mapped_parameter_count slots. They index the context, + // where parameters are stored in reverse order, at + // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1 + // The mapped parameter thus need to get indices + // MIN_CONTEXT_SLOTS+parameter_count-1 .. + // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count + // We loop from right to left. + Label parameters_loop, parameters_test; + __ mr(r9, r4); + __ LoadP(r22, MemOperand(sp, 0 * kPointerSize)); + __ AddSmiLiteral(r22, r22, Smi::FromInt(Context::MIN_CONTEXT_SLOTS), r0); + __ sub(r22, r22, r4); + __ LoadRoot(r10, Heap::kTheHoleValueRootIndex); + __ SmiToPtrArrayOffset(r6, r9); + __ add(r6, r7, r6); + __ addi(r6, r6, Operand(kParameterMapHeaderSize)); + + // r9 = loop variable (tagged) + // r4 = mapping index (tagged) + // r6 = address of backing store (tagged) + // r7 = address of parameter map (tagged) + // r8 = temporary scratch (a.o., for address calculation) + // r10 = the hole value + __ b(¶meters_test); + + __ bind(¶meters_loop); + __ SubSmiLiteral(r9, r9, Smi::FromInt(1), r0); + __ SmiToPtrArrayOffset(r8, r9); + __ addi(r8, r8, Operand(kParameterMapHeaderSize - kHeapObjectTag)); + __ StorePX(r22, MemOperand(r8, r7)); + __ subi(r8, r8, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize)); + __ StorePX(r10, MemOperand(r8, r6)); + __ AddSmiLiteral(r22, r22, Smi::FromInt(1), r0); + __ bind(¶meters_test); + __ CmpSmiLiteral(r9, Smi::FromInt(0), r0); + __ bne(¶meters_loop); + + __ bind(&skip_parameter_map); + // r5 = argument count (tagged) + // r6 = address of backing store (tagged) + // r8 = scratch + // Copy arguments header and remaining slots (if there are any). + __ LoadRoot(r8, Heap::kFixedArrayMapRootIndex); + __ StoreP(r8, FieldMemOperand(r6, FixedArray::kMapOffset), r0); + __ StoreP(r5, FieldMemOperand(r6, FixedArray::kLengthOffset), r0); + + Label arguments_loop, arguments_test; + __ mr(r22, r4); + __ LoadP(r7, MemOperand(sp, 1 * kPointerSize)); + __ SmiToPtrArrayOffset(r8, r22); + __ sub(r7, r7, r8); + __ b(&arguments_test); + + __ bind(&arguments_loop); + __ subi(r7, r7, Operand(kPointerSize)); + __ LoadP(r9, MemOperand(r7, 0)); + __ SmiToPtrArrayOffset(r8, r22); + __ add(r8, r6, r8); + __ StoreP(r9, FieldMemOperand(r8, FixedArray::kHeaderSize), r0); + __ AddSmiLiteral(r22, r22, Smi::FromInt(1), r0); + + __ bind(&arguments_test); + __ cmp(r22, r5); + __ blt(&arguments_loop); + + // Return and remove the on-stack parameters. + __ addi(sp, sp, Operand(3 * kPointerSize)); + __ Ret(); + + // Do the runtime call to allocate the arguments object. + // r5 = argument count (tagged) + __ bind(&runtime); + __ StoreP(r5, MemOperand(sp, 0 * kPointerSize)); // Patch argument count. + __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); +} + +void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { + // sp[0] : number of parameters + // sp[4] : receiver displacement + // sp[8] : function + // Check if the calling frame is an arguments adaptor frame. + Label adaptor_frame, try_allocate, runtime; + __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kContextOffset)); + STATIC_ASSERT(StackFrame::ARGUMENTS_ADAPTOR < 0x3fffu); + __ CmpSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0); + __ beq(&adaptor_frame); + + // Get the length from the frame. + __ LoadP(r4, MemOperand(sp, 0)); + __ b(&try_allocate); + + // Patch the arguments.length and the parameters pointer. + __ bind(&adaptor_frame); + __ LoadP(r4, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ StoreP(r4, MemOperand(sp, 0)); + __ SmiToPtrArrayOffset(r6, r4); + __ add(r6, r5, r6); + __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset)); + __ StoreP(r6, MemOperand(sp, 1 * kPointerSize)); + + // Try the new space allocation. Start out with computing the size + // of the arguments object and the elements array in words. + Label add_arguments_object; + __ bind(&try_allocate); + __ cmpi(r4, Operand(0, RelocInfo::NONE)); + __ beq(&add_arguments_object); + __ SmiUntag(r4); + __ addi(r4, r4, Operand(FixedArray::kHeaderSize / kPointerSize)); + __ bind(&add_arguments_object); + __ addi(r4, r4, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize)); + + // Do the allocation of both objects in one go. + __ AllocateInNewSpace(r4, + r3, + r5, + r6, + &runtime, + static_cast(TAG_OBJECT | + SIZE_IN_WORDS)); + + // Get the arguments boilerplate from the current native context. + __ LoadP(r7, + MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); + __ LoadP(r7, FieldMemOperand(r7, GlobalObject::kNativeContextOffset)); + __ LoadP(r7, MemOperand(r7, Context::SlotOffset( + Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX))); + + // Copy the JS object part. + __ CopyFields(r3, r7, r6.bit(), JSObject::kHeaderSize / kPointerSize); + + // Get the length (smi tagged) and set that as an in-object property too. + STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); + __ LoadP(r4, MemOperand(sp, 0 * kPointerSize)); + __ StoreP(r4, FieldMemOperand(r3, JSObject::kHeaderSize + + Heap::kArgumentsLengthIndex * kPointerSize), + r0); + + // If there are no actual arguments, we're done. + Label done; + __ cmpi(r4, Operand(0, RelocInfo::NONE)); + __ beq(&done); + + // Get the parameters pointer from the stack. + __ LoadP(r5, MemOperand(sp, 1 * kPointerSize)); + + // Set up the elements pointer in the allocated arguments object and + // initialize the header in the elements fixed array. + __ addi(r7, r3, Operand(Heap::kArgumentsObjectSizeStrict)); + __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0); + __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex); + __ StoreP(r6, FieldMemOperand(r7, FixedArray::kMapOffset), r0); + __ StoreP(r4, FieldMemOperand(r7, FixedArray::kLengthOffset), r0); + // Untag the length for the loop. + __ SmiUntag(r4); + + // Copy the fixed array slots. + Label loop; + // Set up r7 to point to the first array slot. + __ addi(r7, r7, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ bind(&loop); + // Pre-decrement r5 with kPointerSize on each iteration. + // Pre-decrement in order to skip receiver. + __ LoadPU(r6, MemOperand(r5, -kPointerSize)); + // Post-increment r7 with kPointerSize on each iteration. + __ StoreP(r6, MemOperand(r7)); + __ addi(r7, r7, Operand(kPointerSize)); + __ subi(r4, r4, Operand(1)); + __ cmpi(r4, Operand(0, RelocInfo::NONE)); + __ bne(&loop); + + // Return and remove the on-stack parameters. + __ bind(&done); + __ addi(sp, sp, Operand(3 * kPointerSize)); + __ Ret(); + + // Do the runtime call to allocate the arguments object. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1); +} + + +void RegExpExecStub::Generate(MacroAssembler* masm) { + // Just jump directly to runtime if native RegExp is not selected at compile + // time or if regexp entry in generated code is turned off runtime switch or + // at compilation. +#ifdef V8_INTERPRETED_REGEXP + __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); +#else // V8_INTERPRETED_REGEXP + + // Stack frame on entry. + // sp[0]: last_match_info (expected JSArray) + // sp[4]: previous index + // sp[8]: subject string + // sp[12]: JSRegExp object + + const int kLastMatchInfoOffset = 0 * kPointerSize; + const int kPreviousIndexOffset = 1 * kPointerSize; + const int kSubjectOffset = 2 * kPointerSize; + const int kJSRegExpOffset = 3 * kPointerSize; + + Label runtime, invoke_regexp, br_over, encoding_type_UC16; + + // Allocation of registers for this function. These are in callee save + // registers and will be preserved by the call to the native RegExp code, as + // this code is called using the normal C calling convention. When calling + // directly from generated code the native RegExp code will not do a GC and + // therefore the content of these registers are safe to use after the call. + Register subject = r26; + Register regexp_data = r27; + Register last_match_info_elements = r28; + Register code = r29; + + // Ensure register assigments are consistent with callee save masks + ASSERT(subject.bit() & (kCalleeSaved & kRegExpCalleeSaved)); + ASSERT(regexp_data.bit() & (kCalleeSaved & kRegExpCalleeSaved)); + ASSERT(last_match_info_elements.bit() & (kCalleeSaved & kRegExpCalleeSaved)); + ASSERT(code.bit() & (kCalleeSaved & kRegExpCalleeSaved)); + + // Ensure that a RegExp stack is allocated. + Isolate* isolate = masm->isolate(); + ExternalReference address_of_regexp_stack_memory_address = + ExternalReference::address_of_regexp_stack_memory_address(isolate); + ExternalReference address_of_regexp_stack_memory_size = + ExternalReference::address_of_regexp_stack_memory_size(isolate); + __ mov(r3, Operand(address_of_regexp_stack_memory_size)); + __ LoadP(r3, MemOperand(r3, 0)); + __ cmpi(r3, Operand::Zero()); + __ beq(&runtime); + + // Check that the first argument is a JSRegExp object. + __ LoadP(r3, MemOperand(sp, kJSRegExpOffset)); + STATIC_ASSERT(kSmiTag == 0); + __ JumpIfSmi(r3, &runtime); + __ CompareObjectType(r3, r4, r4, JS_REGEXP_TYPE); + __ bne(&runtime); + + // Check that the RegExp has been compiled (data contains a fixed array). + __ LoadP(regexp_data, FieldMemOperand(r3, JSRegExp::kDataOffset)); + if (FLAG_debug_code) { + STATIC_ASSERT(kSmiTagMask == 1); + __ andi(r0, regexp_data, Operand(kSmiTagMask)); + __ Check(ne, "Unexpected type for RegExp data, FixedArray expected", cr0); + __ CompareObjectType(regexp_data, r3, r3, FIXED_ARRAY_TYPE); + __ Check(eq, "Unexpected type for RegExp data, FixedArray expected"); + } + + // regexp_data: RegExp data (FixedArray) + // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. + __ LoadP(r3, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset)); + // ASSERT(Smi::FromInt(JSRegExp::IRREGEXP) < (char *)0xffffu); + __ CmpSmiLiteral(r3, Smi::FromInt(JSRegExp::IRREGEXP), r0); + __ bne(&runtime); + + // regexp_data: RegExp data (FixedArray) + // Check that the number of captures fit in the static offsets vector buffer. + __ LoadP(r5, + FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); + // Calculate number of capture registers (number_of_captures + 1) * 2. + __ SmiToShortArrayOffset(r5, r5); + __ addi(r5, r5, Operand(2)); + // Check that the static offsets vector buffer is large enough. + // STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize < 0xffffu); + __ cmpli(r5, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize)); + __ bgt(&runtime); + + // r5: Number of capture registers + // regexp_data: RegExp data (FixedArray) + // Check that the second argument is a string. + __ LoadP(subject, MemOperand(sp, kSubjectOffset)); + __ JumpIfSmi(subject, &runtime); + Condition is_string = masm->IsObjectStringType(subject, r3); + __ b(NegateCondition(is_string), &runtime, cr0); + // Get the length of the string to r6. + __ LoadP(r6, FieldMemOperand(subject, String::kLengthOffset)); + + // r5: Number of capture registers + // r6: Length of subject string as a smi + // subject: Subject string + // regexp_data: RegExp data (FixedArray) + // Check that the third argument is a positive smi less than the subject + // string length. A negative value will be greater (unsigned comparison). + __ LoadP(r3, MemOperand(sp, kPreviousIndexOffset)); + __ JumpIfNotSmi(r3, &runtime); + __ cmpl(r6, r3); + __ ble(&runtime); + + // r5: Number of capture registers + // subject: Subject string + // regexp_data: RegExp data (FixedArray) + // Check that the fourth object is a JSArray object. + __ LoadP(r3, MemOperand(sp, kLastMatchInfoOffset)); + __ JumpIfSmi(r3, &runtime); + __ CompareObjectType(r3, r4, r4, JS_ARRAY_TYPE); + __ bne(&runtime); + // Check that the JSArray is in fast case. + __ LoadP(last_match_info_elements, + FieldMemOperand(r3, JSArray::kElementsOffset)); + __ LoadP(r3, FieldMemOperand(last_match_info_elements, + HeapObject::kMapOffset)); + __ CompareRoot(r3, Heap::kFixedArrayMapRootIndex); + __ bne(&runtime); + // Check that the last match info has space for the capture registers and the + // additional information. + __ LoadP(r3, + FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset)); + __ addi(r5, r5, Operand(RegExpImpl::kLastMatchOverhead)); + __ SmiUntag(r0, r3); + __ cmp(r5, r0); + __ bgt(&runtime); + + // Reset offset for possibly sliced string. + __ li(r11, Operand::Zero()); + // subject: Subject string + // regexp_data: RegExp data (FixedArray) + // Check the representation and encoding of the subject string. + Label seq_string; + __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset)); + __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset)); + // First check for flat string. None of the following string type tests will + // succeed if subject is not a string or a short external string. + STATIC_ASSERT((kIsNotStringMask | + kStringRepresentationMask | + kShortExternalStringMask) == 0x93); + __ andi(r4, r3, Operand(kIsNotStringMask | + kStringRepresentationMask | + kShortExternalStringMask)); + STATIC_ASSERT((kStringTag | kSeqStringTag) == 0); + __ beq(&seq_string, cr0); + + // subject: Subject string + // regexp_data: RegExp data (FixedArray) + // r4: whether subject is a string and if yes, its string representation + // Check for flat cons string or sliced string. + // A flat cons string is a cons string where the second part is the empty + // string. In that case the subject string is just the first part of the cons + // string. Also in this case the first part of the cons string is known to be + // a sequential string or an external string. + // In the case of a sliced string its offset has to be taken into account. + Label cons_string, external_string, check_encoding; + STATIC_ASSERT(kConsStringTag < kExternalStringTag); + STATIC_ASSERT(kSlicedStringTag > kExternalStringTag); + STATIC_ASSERT(kIsNotStringMask > kExternalStringTag); + STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag); + STATIC_ASSERT(kExternalStringTag < 0xffffu); + __ cmpi(r4, Operand(kExternalStringTag)); + __ blt(&cons_string); + __ beq(&external_string); + + // Catch non-string subject or short external string. + STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0); + STATIC_ASSERT((kNotStringTag | kShortExternalStringTag) < 0xffffu); + __ andi(r0, r4, Operand(kIsNotStringMask | kShortExternalStringMask)); + __ bne(&runtime, cr0); + + // String is sliced. + __ LoadP(r11, FieldMemOperand(subject, SlicedString::kOffsetOffset)); + __ SmiUntag(r11); + __ LoadP(subject, FieldMemOperand(subject, SlicedString::kParentOffset)); + // r11: offset of sliced string, smi-tagged. + __ b(&check_encoding); + // String is a cons string, check whether it is flat. + __ bind(&cons_string); + __ LoadP(r3, FieldMemOperand(subject, ConsString::kSecondOffset)); + __ CompareRoot(r3, Heap::kEmptyStringRootIndex); + __ bne(&runtime); + __ LoadP(subject, FieldMemOperand(subject, ConsString::kFirstOffset)); + // Is first part of cons or parent of slice a flat string? + __ bind(&check_encoding); + __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset)); + __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset)); + STATIC_ASSERT(kSeqStringTag == 0); + STATIC_ASSERT(kStringRepresentationMask == 3); + __ andi(r0, r3, Operand(kStringRepresentationMask)); + __ bne(&external_string, cr0); + + __ bind(&seq_string); + // subject: Subject string + // regexp_data: RegExp data (FixedArray) + // r3: Instance type of subject string + STATIC_ASSERT(4 == kAsciiStringTag); + STATIC_ASSERT(kTwoByteStringTag == 0); + // Find the code object based on the assumptions above. + STATIC_ASSERT(kStringEncodingMask == 4); + __ ExtractBitMask(r6, r3, kStringEncodingMask, SetRC); + __ beq(&encoding_type_UC16, cr0); + __ LoadP(code, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset)); + __ b(&br_over); + __ bind(&encoding_type_UC16); + __ LoadP(code, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset)); + __ bind(&br_over); + + // Check that the irregexp code has been generated for the actual string + // encoding. If it has, the field contains a code object otherwise it contains + // a smi (code flushing support). + __ JumpIfSmi(code, &runtime); + + // r6: encoding of subject string (1 if ASCII, 0 if two_byte); + // code: Address of generated regexp code + // subject: Subject string + // regexp_data: RegExp data (FixedArray) + // Load used arguments before starting to push arguments for call to native + // RegExp code to avoid handling changing stack height. + __ LoadP(r4, MemOperand(sp, kPreviousIndexOffset)); + __ SmiUntag(r4); + + // r4: previous index + // r6: encoding of subject string (1 if ASCII, 0 if two_byte); + // code: Address of generated regexp code + // subject: Subject string + // regexp_data: RegExp data (FixedArray) + // All checks done. Now push arguments for native regexp code. + __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r3, r5); + + // Isolates: note we add an additional parameter here (isolate pointer). + const int kRegExpExecuteArguments = 10; + const int kParameterRegisters = 8; + __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); + + // Stack pointer now points to cell where return address is to be written. + // Arguments are before that on the stack or in registers. + + // Argument 10 (in stack parameter area): Pass current isolate address. + __ mov(r3, Operand(ExternalReference::isolate_address())); + __ StoreP(r3, MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize)); + + // Argument 9 is a dummy that reserves the space used for + // the return address added by the ExitFrame in native calls. + + // Argument 8 (r10): Indicate that this is a direct call from JavaScript. + __ li(r10, Operand(1)); + + // Argument 7 (r9): Start (high end) of backtracking stack memory area. + __ mov(r3, Operand(address_of_regexp_stack_memory_address)); + __ LoadP(r3, MemOperand(r3, 0)); + __ mov(r5, Operand(address_of_regexp_stack_memory_size)); + __ LoadP(r5, MemOperand(r5, 0)); + __ add(r9, r3, r5); + + // Argument 6 (r8): Set the number of capture registers to zero to force + // global egexps to behave as non-global. This does not affect non-global + // regexps. + __ li(r8, Operand::Zero()); + + // Argument 5 (r7): static offsets vector buffer. + __ mov(r7, + Operand(ExternalReference::address_of_static_offsets_vector(isolate))); + + // For arguments 4 (r6) and 3 (r5) get string length, calculate start of + // string data and calculate the shift of the index (0 for ASCII and 1 for + // two byte). + __ addi(r22, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag)); + __ xori(r6, r6, Operand(1)); + // Load the length from the original subject string from the previous stack + // frame. Therefore we have to use fp, which points exactly to two pointer + // sizes below the previous sp. (Because creating a new stack frame pushes + // the previous fp onto the stack and moves up sp by 2 * kPointerSize.) + __ LoadP(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize)); + // If slice offset is not 0, load the length from the original sliced string. + // Argument 4, r6: End of string data + // Argument 3, r5: Start of string data + // Prepare start and end index of the input. + __ ShiftLeft(r11, r11, r6); + __ add(r11, r22, r11); + __ ShiftLeft(r5, r4, r6); + __ add(r5, r11, r5); + + __ LoadP(r22, FieldMemOperand(subject, String::kLengthOffset)); + __ SmiUntag(r22); + __ ShiftLeft(r6, r22, r6); + __ add(r6, r11, r6); + + // Argument 2 (r4): Previous index. + // Already there + + // Argument 1 (r3): Subject string. + __ mr(r3, subject); + + // Locate the code entry and call it. + __ addi(code, code, Operand(Code::kHeaderSize - kHeapObjectTag)); + + +#if ABI_USES_FUNCTION_DESCRIPTORS && defined(USE_SIMULATOR) + // Even Simulated AIX/PPC64 Linux uses a function descriptor for the + // RegExp routine. Extract the instruction address here since + // DirectCEntryStub::GenerateCall will not do it for calls out to + // what it thinks is C code compiled for the simulator/host + // platform. + __ LoadP(code, MemOperand(code, 0)); // Instruction address +#endif + + DirectCEntryStub stub; + stub.GenerateCall(masm, code); + + __ LeaveExitFrame(false, no_reg); + + // r3: result + // subject: subject string (callee saved) + // regexp_data: RegExp data (callee saved) + // last_match_info_elements: Last match info elements (callee saved) + + // Check the result. + Label success; + + __ cmpi(r3, Operand(1)); + // We expect exactly one result since we force the called regexp to behave + // as non-global. + __ beq(&success); + Label failure; + __ cmpi(r3, Operand(NativeRegExpMacroAssembler::FAILURE)); + __ beq(&failure); + __ cmpi(r3, Operand(NativeRegExpMacroAssembler::EXCEPTION)); + // If not exception it can only be retry. Handle that in the runtime system. + __ bne(&runtime); + // Result must now be exception. If there is no pending exception already a + // stack overflow (on the backtrack stack) was detected in RegExp code but + // haven't created the exception yet. Handle that in the runtime system. + // TODO(592): Rerunning the RegExp to get the stack overflow exception. + __ mov(r4, Operand(isolate->factory()->the_hole_value())); + __ mov(r5, Operand(ExternalReference(Isolate::kPendingExceptionAddress, + isolate))); + __ LoadP(r3, MemOperand(r5, 0)); + __ cmp(r3, r4); + __ beq(&runtime); + + __ StoreP(r4, MemOperand(r5, 0)); // Clear pending exception. + + // Check if the exception is a termination. If so, throw as uncatchable. + __ CompareRoot(r3, Heap::kTerminationExceptionRootIndex); + + Label termination_exception; + __ beq(&termination_exception); + + __ Throw(r3); + + __ bind(&termination_exception); + __ ThrowUncatchable(r3); + + __ bind(&failure); + // For failure and exception return null. + __ mov(r3, Operand(masm->isolate()->factory()->null_value())); + __ addi(sp, sp, Operand(4 * kPointerSize)); + __ Ret(); + + // Process the result from the native regexp code. + __ bind(&success); + __ LoadP(r4, + FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); + // Calculate number of capture registers (number_of_captures + 1) * 2. + __ SmiToShortArrayOffset(r4, r4); + __ addi(r4, r4, Operand(2)); + + // r4: number of capture registers + // r26: subject string + // Store the capture count. + __ SmiTag(r5, r4); + __ StoreP(r5, FieldMemOperand(last_match_info_elements, + RegExpImpl::kLastCaptureCountOffset), r0); + // Store last subject and last input. + __ StoreP(subject, + FieldMemOperand(last_match_info_elements, + RegExpImpl::kLastSubjectOffset), r0); + __ mr(r5, subject); + __ RecordWriteField(last_match_info_elements, + RegExpImpl::kLastSubjectOffset, + r5, + r10, + kLRHasNotBeenSaved, + kDontSaveFPRegs); + __ StoreP(subject, + FieldMemOperand(last_match_info_elements, + RegExpImpl::kLastInputOffset), r0); + __ RecordWriteField(last_match_info_elements, + RegExpImpl::kLastInputOffset, + subject, + r10, + kLRHasNotBeenSaved, + kDontSaveFPRegs); + + // Get the static offsets vector filled by the native regexp code. + ExternalReference address_of_static_offsets_vector = + ExternalReference::address_of_static_offsets_vector(isolate); + __ mov(r5, Operand(address_of_static_offsets_vector)); + + // r4: number of capture registers + // r5: offsets vector + Label next_capture; + // Capture register counter starts from number of capture registers and + // counts down until wraping after zero. + __ addi(r3, + last_match_info_elements, + Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag - + kPointerSize)); + __ addi(r5, r5, Operand(-kIntSize)); // bias down for lwzu + __ mtctr(r4); + __ bind(&next_capture); + // Read the value from the static offsets vector buffer. + __ lwzu(r6, MemOperand(r5, kIntSize)); + // Store the smi value in the last match info. + __ SmiTag(r6); + __ StorePU(r6, MemOperand(r3, kPointerSize)); + __ bdnz(&next_capture); + + // Return last match info. + __ LoadP(r3, MemOperand(sp, kLastMatchInfoOffset)); + __ addi(sp, sp, Operand(4 * kPointerSize)); + __ Ret(); + + // External string. Short external strings have already been ruled out. + // r3: scratch + __ bind(&external_string); + __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset)); + __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset)); + if (FLAG_debug_code) { + // Assert that we do not have a cons or slice (indirect strings) here. + // Sequential strings have already been ruled out. + STATIC_ASSERT(kIsIndirectStringMask == 1); + __ andi(r0, r3, Operand(kIsIndirectStringMask)); + __ Assert(eq, "external string expected, but not found", cr0); + } + __ LoadP(subject, + FieldMemOperand(subject, ExternalString::kResourceDataOffset)); + // Move the pointer so that offset-wise, it looks like a sequential string. + STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize); + __ subi(subject, + subject, + Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + __ b(&seq_string); + + // Do the runtime call to execute the regexp. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); +#endif // V8_INTERPRETED_REGEXP +} + + +void RegExpConstructResultStub::Generate(MacroAssembler* masm) { + const int kMaxInlineLength = 100; + Label slowcase; + Label done; + Factory* factory = masm->isolate()->factory(); + + __ LoadP(r4, MemOperand(sp, kPointerSize * 2)); + __ JumpIfNotSmi(r4, &slowcase); + __ CmplSmiLiteral(r4, Smi::FromInt(kMaxInlineLength), r0); + __ bgt(&slowcase); + // Allocate RegExpResult followed by FixedArray with size in ebx. + // JSArray: [Map][empty properties][Elements][Length-smi][index][input] + // Elements: [Map][Length][..elements..] + // Size of JSArray with two in-object properties and the header of a + // FixedArray. + int objects_size = + (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize; + __ SmiUntag(r8, r4); + __ addi(r5, r8, Operand(objects_size)); + // Future optimization: defer tagging the result pointer for more + // efficient 64-bit memory accesses (due to alignment requirements + // on the memoperand offset). + __ AllocateInNewSpace( + r5, // In: Size, in words. + r3, // Out: Start of allocation (tagged). + r6, // Scratch register. + r7, // Scratch register. + &slowcase, + static_cast(TAG_OBJECT | SIZE_IN_WORDS)); + // r3: Start of allocated area, object-tagged. + // r4: Number of elements in array, as smi. + // r8: Number of elements, untagged. + + // Set JSArray map to global.regexp_result_map(). + // Set empty properties FixedArray. + // Set elements to point to FixedArray allocated right after the JSArray. + // Interleave operations for better latency. + __ LoadP(r5, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); + __ addi(r6, r3, Operand(JSRegExpResult::kSize)); + __ mov(r7, Operand(factory->empty_fixed_array())); + __ LoadP(r5, FieldMemOperand(r5, GlobalObject::kNativeContextOffset)); + __ StoreP(r6, FieldMemOperand(r3, JSObject::kElementsOffset), r0); + __ LoadP(r5, ContextOperand(r5, Context::REGEXP_RESULT_MAP_INDEX)); + __ StoreP(r7, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0); + __ StoreP(r5, FieldMemOperand(r3, HeapObject::kMapOffset), r0); + + // Set input, index and length fields from arguments. + __ LoadP(r4, MemOperand(sp, kPointerSize * 0)); + __ LoadP(r5, MemOperand(sp, kPointerSize * 1)); + __ LoadP(r9, MemOperand(sp, kPointerSize * 2)); + __ StoreP(r4, FieldMemOperand(r3, JSRegExpResult::kInputOffset), r0); + __ StoreP(r5, FieldMemOperand(r3, JSRegExpResult::kIndexOffset), r0); + __ StoreP(r9, FieldMemOperand(r3, JSArray::kLengthOffset), r0); + + // Fill out the elements FixedArray. + // r3: JSArray, tagged. + // r6: FixedArray, tagged. + // r8: Number of elements in array, untagged. + + // Set map. + __ mov(r5, Operand(factory->fixed_array_map())); + __ StoreP(r5, FieldMemOperand(r6, HeapObject::kMapOffset), r0); + // Set FixedArray length. + __ SmiTag(r9, r8); + __ StoreP(r9, FieldMemOperand(r6, FixedArray::kLengthOffset), r0); + // Fill contents of fixed-array with undefined. + __ LoadRoot(r5, Heap::kUndefinedValueRootIndex); + __ addi(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + // Fill fixed array elements with undefined. + // r3: JSArray, tagged. + // r5: undefined. + // r6: Start of elements in FixedArray. + // r8: Number of elements to fill. + Label loop; + __ cmpi(r8, Operand::Zero()); + __ bind(&loop); + __ ble(&done); // Jump if r8 is negative or zero. + __ subi(r8, r8, Operand(1)); + __ ShiftLeftImm(ip, r8, Operand(kPointerSizeLog2)); + __ StorePX(r5, MemOperand(ip, r6)); + __ cmpi(r8, Operand::Zero()); + __ b(&loop); + + __ bind(&done); + __ addi(sp, sp, Operand(3 * kPointerSize)); + __ Ret(); + + __ bind(&slowcase); + __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1); +} + + +static void GenerateRecordCallTarget(MacroAssembler* masm) { + // Cache the called function in a global property cell. Cache states + // are uninitialized, monomorphic (indicated by a JSFunction), and + // megamorphic. + // r4 : the function to call + // r5 : cache cell for call target + Label initialize, done; + const Register scratch = r6; + + ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()), + masm->isolate()->heap()->undefined_value()); + ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()), + masm->isolate()->heap()->the_hole_value()); + + // Load the cache state into scratch. + __ LoadP(scratch, FieldMemOperand(r5, JSGlobalPropertyCell::kValueOffset)); + + // A monomorphic cache hit or an already megamorphic state: invoke the + // function without changing the state. + __ cmp(scratch, r4); + __ beq(&done); + __ CompareRoot(scratch, Heap::kUndefinedValueRootIndex); + __ beq(&done); + + // A monomorphic miss (i.e, here the cache is not uninitialized) goes + // megamorphic. + __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex); + __ beq(&initialize); + // MegamorphicSentinel is an immortal immovable object (undefined) so no + // write-barrier is needed. + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ StoreP(ip, FieldMemOperand(r5, JSGlobalPropertyCell::kValueOffset), r0); + __ b(&done); + + // An uninitialized cache is patched with the function. + __ bind(&initialize); + __ StoreP(r4, FieldMemOperand(r5, JSGlobalPropertyCell::kValueOffset), r0); + // No need for a write barrier here - cells are rescanned. + + __ bind(&done); +} + + +void CallFunctionStub::Generate(MacroAssembler* masm) { + // r4 : the function to call + // r5 : cache cell for call target + Label slow, non_function; + + // The receiver might implicitly be the global object. This is + // indicated by passing the hole as the receiver to the call + // function stub. + if (ReceiverMightBeImplicit()) { + Label call; + // Get the receiver from the stack. + // function, receiver [, arguments] + __ LoadP(r7, MemOperand(sp, argc_ * kPointerSize), r0); + // Call as function is indicated with the hole. + __ CompareRoot(r7, Heap::kTheHoleValueRootIndex); + __ bne(&call); + // Patch the receiver on the stack with the global receiver object. + __ LoadP(r6, + MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); + __ LoadP(r6, FieldMemOperand(r6, GlobalObject::kGlobalReceiverOffset)); + __ StoreP(r6, MemOperand(sp, argc_ * kPointerSize), r0); + __ bind(&call); + } + + // Check that the function is really a JavaScript function. + // r4: pushed function (to be verified) + __ JumpIfSmi(r4, &non_function); + // Get the map of the function object. + __ CompareObjectType(r4, r6, r6, JS_FUNCTION_TYPE); + __ bne(&slow); + + if (RecordCallTarget()) { + GenerateRecordCallTarget(masm); + } + + // Fast-case: Invoke the function now. + // r4: pushed function + ParameterCount actual(argc_); + + if (ReceiverMightBeImplicit()) { + Label call_as_function; + __ CompareRoot(r7, Heap::kTheHoleValueRootIndex); + __ beq(&call_as_function); + __ InvokeFunction(r4, + actual, + JUMP_FUNCTION, + NullCallWrapper(), + CALL_AS_METHOD); + __ bind(&call_as_function); + } + __ InvokeFunction(r4, + actual, + JUMP_FUNCTION, + NullCallWrapper(), + CALL_AS_FUNCTION); + + // Slow-case: Non-function called. + __ bind(&slow); + if (RecordCallTarget()) { + // If there is a call target cache, mark it megamorphic in the + // non-function case. MegamorphicSentinel is an immortal immovable + // object (undefined) so no write barrier is needed. + ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()), + masm->isolate()->heap()->undefined_value()); + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ StoreP(ip, FieldMemOperand(r5, JSGlobalPropertyCell::kValueOffset), r0); + } + // Check for function proxy. + STATIC_ASSERT(JS_FUNCTION_PROXY_TYPE < 0xffffu); + __ cmpi(r6, Operand(JS_FUNCTION_PROXY_TYPE)); + __ bne(&non_function); + __ push(r4); // put proxy as additional argument + __ li(r3, Operand(argc_ + 1)); + __ li(r5, Operand::Zero()); + __ GetBuiltinEntry(r6, Builtins::CALL_FUNCTION_PROXY); + __ SetCallKind(r8, CALL_AS_METHOD); + { + Handle adaptor = + masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); + __ Jump(adaptor, RelocInfo::CODE_TARGET); + } + + // CALL_NON_FUNCTION expects the non-function callee as receiver (instead + // of the original receiver from the call site). + __ bind(&non_function); + __ StoreP(r4, MemOperand(sp, argc_ * kPointerSize), r0); + __ li(r3, Operand(argc_)); // Set up the number of arguments. + __ li(r5, Operand::Zero()); + __ GetBuiltinEntry(r6, Builtins::CALL_NON_FUNCTION); + __ SetCallKind(r8, CALL_AS_METHOD); + __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + RelocInfo::CODE_TARGET); +} + + +void CallConstructStub::Generate(MacroAssembler* masm) { + // r3 : number of arguments + // r4 : the function to call + // r5 : cache cell for call target + Label slow, non_function_call; + + // Check that the function is not a smi. + __ JumpIfSmi(r4, &non_function_call); + // Check that the function is a JSFunction. + __ CompareObjectType(r4, r6, r6, JS_FUNCTION_TYPE); + __ bne(&slow); + + if (RecordCallTarget()) { + GenerateRecordCallTarget(masm); + } + + // Jump to the function-specific construct stub. + __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); + __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kConstructStubOffset)); + __ addi(r0, r5, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ Jump(r0); + + // r3: number of arguments + // r4: called object + // r6: object type + Label do_call; + __ bind(&slow); + STATIC_ASSERT(JS_FUNCTION_PROXY_TYPE < 0xffffu); + __ cmpi(r6, Operand(JS_FUNCTION_PROXY_TYPE)); + __ bne(&non_function_call); + __ GetBuiltinEntry(r6, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR); + __ b(&do_call); + + __ bind(&non_function_call); + __ GetBuiltinEntry(r6, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); + __ bind(&do_call); + // Set expected number of arguments to zero (not changing r3). + __ li(r5, Operand::Zero()); + __ SetCallKind(r8, CALL_AS_METHOD); + __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + RelocInfo::CODE_TARGET); +} + + +// Unfortunately you have to run without snapshots to see most of these +// names in the profile since most compare stubs end up in the snapshot. +void CompareStub::PrintName(StringStream* stream) { + ASSERT((lhs_.is(r3) && rhs_.is(r4)) || + (lhs_.is(r4) && rhs_.is(r3))); + const char* cc_name; + switch (cc_) { + case lt: cc_name = "LT"; break; + case gt: cc_name = "GT"; break; + case le: cc_name = "LE"; break; + case ge: cc_name = "GE"; break; + case eq: cc_name = "EQ"; break; + case ne: cc_name = "NE"; break; + default: cc_name = "UnknownCondition"; break; + } + bool is_equality = cc_ == eq || cc_ == ne; + stream->Add("CompareStub_%s", cc_name); + stream->Add(lhs_.is(r3) ? "_r3" : "_r4"); + stream->Add(rhs_.is(r3) ? "_r3" : "_r4"); + if (strict_ && is_equality) stream->Add("_STRICT"); + if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN"); + if (!include_number_compare_) stream->Add("_NO_NUMBER"); + if (!include_smi_compare_) stream->Add("_NO_SMI"); +} + + +int CompareStub::MinorKey() { + // Encode the three parameters in a unique 16 bit value. To avoid duplicate + // stubs the never NaN NaN condition is only taken into account if the + // condition is equals. + ASSERT(static_cast(cc_) < (1 << 12)); + ASSERT((lhs_.is(r3) && rhs_.is(r4)) || + (lhs_.is(r4) && rhs_.is(r3))); + return ConditionField::encode(static_cast(cc_)) + | RegisterField::encode(lhs_.is(r3)) + | StrictField::encode(strict_) + | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false) + | IncludeNumberCompareField::encode(include_number_compare_) + | IncludeSmiCompareField::encode(include_smi_compare_); +} + + +// StringCharCodeAtGenerator +void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { + Label flat_string; + Label ascii_string; + Label got_char_code; + Label sliced_string; + + // If the receiver is a smi trigger the non-string case. + __ JumpIfSmi(object_, receiver_not_string_); + + // Fetch the instance type of the receiver into result register. + __ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); + __ lbz(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); + // If the receiver is not a string trigger the non-string case. + __ andi(r0, result_, Operand(kIsNotStringMask)); + __ bne(receiver_not_string_, cr0); + + // If the index is non-smi trigger the non-smi case. + __ JumpIfNotSmi(index_, &index_not_smi_); + __ bind(&got_smi_index_); + + // Check for index out of range. + __ LoadP(ip, FieldMemOperand(object_, String::kLengthOffset)); + __ cmpl(ip, index_); + __ ble(index_out_of_range_); + + __ SmiUntag(index_); + + StringCharLoadGenerator::Generate(masm, + object_, + index_, + result_, + &call_runtime_); + + __ SmiTag(result_, result_); + __ bind(&exit_); +} + + +void StringCharCodeAtGenerator::GenerateSlow( + MacroAssembler* masm, + const RuntimeCallHelper& call_helper) { + __ Abort("Unexpected fallthrough to CharCodeAt slow case"); + + // Index is not a smi. + __ bind(&index_not_smi_); + // If index is a heap number, try converting it to an integer. + __ CheckMap(index_, + result_, + Heap::kHeapNumberMapRootIndex, + index_not_number_, + DONT_DO_SMI_CHECK); + call_helper.BeforeCall(masm); + __ push(object_); + __ push(index_); // Consumed by runtime conversion function. + if (index_flags_ == STRING_INDEX_IS_NUMBER) { + __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); + } else { + ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); + // NumberToSmi discards numbers that are not exact integers. + __ CallRuntime(Runtime::kNumberToSmi, 1); + } + // Save the conversion result before the pop instructions below + // have a chance to overwrite it. + __ Move(index_, r3); + __ pop(object_); + // Reload the instance type. + __ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); + __ lbz(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); + call_helper.AfterCall(masm); + // If index is still not a smi, it must be out of range. + __ JumpIfNotSmi(index_, index_out_of_range_); + // Otherwise, return to the fast path. + __ b(&got_smi_index_); + + // Call runtime. We get here when the receiver is a string and the + // index is a number, but the code of getting the actual character + // is too complex (e.g., when the string needs to be flattened). + __ bind(&call_runtime_); + call_helper.BeforeCall(masm); + __ SmiTag(index_); + __ Push(object_, index_); + __ CallRuntime(Runtime::kStringCharCodeAt, 2); + __ Move(result_, r3); + call_helper.AfterCall(masm); + __ b(&exit_); + + __ Abort("Unexpected fallthrough from CharCodeAt slow case"); +} + + +// ------------------------------------------------------------------------- +// StringCharFromCodeGenerator + + void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { + // Fast case of Heap::LookupSingleCharacterStringFromCode. + ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1)); + __ LoadSmiLiteral(r0, Smi::FromInt(~String::kMaxAsciiCharCode)); + __ ori(r0, r0, Operand(kSmiTagMask)); + __ and_(r0, code_, r0); + __ cmpi(r0, Operand::Zero()); + __ bne(&slow_case_); + + __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); + // At this point code register contains smi tagged ASCII char code. + __ mr(r0, code_); + __ SmiToPtrArrayOffset(code_, code_); + __ add(result_, result_, code_); + __ mr(code_, r0); + __ LoadP(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); + __ CompareRoot(result_, Heap::kUndefinedValueRootIndex); + __ beq(&slow_case_); + __ bind(&exit_); +} + + +void StringCharFromCodeGenerator::GenerateSlow( + MacroAssembler* masm, + const RuntimeCallHelper& call_helper) { + __ Abort("Unexpected fallthrough to CharFromCode slow case"); + + __ bind(&slow_case_); + call_helper.BeforeCall(masm); + __ push(code_); + __ CallRuntime(Runtime::kCharFromCode, 1); + __ Move(result_, r3); + call_helper.AfterCall(masm); + __ b(&exit_); + + __ Abort("Unexpected fallthrough from CharFromCode slow case"); +} + + +// ------------------------------------------------------------------------- +// StringCharAtGenerator + +void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) { + char_code_at_generator_.GenerateFast(masm); + char_from_code_generator_.GenerateFast(masm); +} + + +void StringCharAtGenerator::GenerateSlow( + MacroAssembler* masm, + const RuntimeCallHelper& call_helper) { + char_code_at_generator_.GenerateSlow(masm, call_helper); + char_from_code_generator_.GenerateSlow(masm, call_helper); +} + + +void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, + Register dest, + Register src, + Register count, + Register scratch, + bool ascii) { + Label loop; + __ bind(&loop); + // This loop just copies one character at a time, as it is only used for very + // short strings. + if (ascii) { + __ lbz(scratch, MemOperand(src)); + __ stb(scratch, MemOperand(dest)); + __ addi(src, src, Operand(1)); + __ addi(dest, dest, Operand(1)); + } else { + __ lhz(scratch, MemOperand(src)); + __ sth(scratch, MemOperand(dest)); + __ addi(src, src, Operand(2)); + __ addi(dest, dest, Operand(2)); + } + __ subi(count, count, Operand(1)); + __ cmpi(count, Operand::Zero()); + __ bgt(&loop); +} + + +enum CopyCharactersFlags { + COPY_ASCII = 1, + DEST_ALWAYS_ALIGNED = 2 +}; + + +// roohack - optimization opportunity here, stringcopy is important +// and the current version below is very dumb +void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm, + Register dest, + Register src, + Register count, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Register scratch5, + int flags) { + bool ascii = (flags & COPY_ASCII) != 0; + bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0; + + if (dest_always_aligned && FLAG_debug_code) { + // Check that destination is actually word aligned if the flag says + // that it is. + __ andi(r0, dest, Operand(kPointerAlignmentMask)); + __ Check(eq, "Destination of copy not aligned.", cr0); + } + + // Nothing to do for zero characters. + Label done; + if (!ascii) { // for non-ascii, double the length + __ add(count, count, count); + } + __ cmpi(count, Operand(0, RelocInfo::NONE)); + __ beq(&done); + + // Assume that you cannot read (or write) unaligned. + Label byte_loop; + __ add(count, dest, count); + Register limit = count; // Read until src equals this. + // Copy bytes from src to dst until dst hits limit. + __ bind(&byte_loop); + __ cmp(dest, limit); + __ bge(&done); + __ lbz(scratch1, MemOperand(src)); + __ addi(src, src, Operand(1)); + __ stb(scratch1, MemOperand(dest)); + __ addi(dest, dest, Operand(1)); + __ b(&byte_loop); + + __ bind(&done); +} + + +void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, + Register c1, + Register c2, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Register scratch5, + Label* not_found) { + // Register scratch3 is the general scratch register in this function. + Register scratch = scratch3; + + // Make sure that both characters are not digits as such strings has a + // different hash algorithm. Don't try to look for these in the symbol table. + Label not_array_index; + __ subi(scratch, c1, Operand(static_cast('0'))); + __ cmpli(scratch, Operand(static_cast('9' - '0'))); + __ bgt(¬_array_index); + __ subi(scratch, c2, Operand(static_cast('0'))); + __ cmpli(scratch, Operand(static_cast('9' - '0'))); + __ bgt(¬_array_index); + + // If check failed combine both characters into single halfword. + // This is required by the contract of the method: code at the + // not_found branch expects this combination in c1 register +#if __BYTE_ORDER == __BIG_ENDIAN + __ ShiftLeftImm(c1, c1, Operand(kBitsPerByte)); + __ orx(c1, c1, c2); +#else + __ ShiftLeftImm(r0, c2, Operand(kBitsPerByte)); + __ orx(c1, c1, r0); +#endif + __ b(not_found); + + __ bind(¬_array_index); + // Calculate the two character string hash. + Register hash = scratch1; + StringHelper::GenerateHashInit(masm, hash, c1, scratch); + StringHelper::GenerateHashAddCharacter(masm, hash, c2, scratch); + StringHelper::GenerateHashGetHash(masm, hash, scratch); + + // Collect the two characters in a register. + Register chars = c1; +#if __BYTE_ORDER == __BIG_ENDIAN + __ ShiftLeftImm(c1, c1, Operand(kBitsPerByte)); + __ orx(chars, c1, c2); +#else + __ ShiftLeftImm(r0, c2, Operand(kBitsPerByte)); + __ orx(chars, c1, r0); +#endif + + // chars: two character string, char 1 in byte 0 and char 2 in byte 1. + // hash: hash of two character string. + + // Load symbol table + // Load address of first element of the symbol table. + Register symbol_table = c2; + __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex); + + Register undefined = scratch4; + __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); + + // Calculate capacity mask from the symbol table capacity. + Register mask = scratch2; + __ LoadP(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset)); + __ SmiUntag(mask); + __ subi(mask, mask, Operand(1)); + + // Calculate untagged address of the first element of the symbol table. + Register first_symbol_table_element = symbol_table; + __ addi(first_symbol_table_element, symbol_table, + Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag)); + + // Registers + // chars: two character string, char 1 in byte 0 and char 2 in byte 1. + // hash: hash of two character string + // mask: capacity mask + // first_symbol_table_element: address of the first element of + // the symbol table + // undefined: the undefined object + // scratch: - + + // Perform a number of probes in the symbol table. + const int kProbes = 4; + Label found_in_symbol_table; + Label next_probe[kProbes]; + Register candidate = scratch5; // Scratch register contains candidate. + for (int i = 0; i < kProbes; i++) { + // Calculate entry in symbol table. + if (i > 0) { + __ addi(candidate, hash, Operand(SymbolTable::GetProbeOffset(i))); + } else { + __ mr(candidate, hash); + } + + __ and_(candidate, candidate, mask); + + // Load the entry from the symble table. + STATIC_ASSERT(SymbolTable::kEntrySize == 1); + __ ShiftLeftImm(scratch, candidate, Operand(kPointerSizeLog2)); + __ LoadPX(candidate, MemOperand(scratch, first_symbol_table_element)); + + // If entry is undefined no string with this hash can be found. + Label is_string; + __ CompareObjectType(candidate, scratch, scratch, ODDBALL_TYPE); + __ bne(&is_string); + + __ cmp(undefined, candidate); + __ beq(not_found); + // Must be the hole (deleted entry). + if (FLAG_debug_code) { + __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); + __ cmp(ip, candidate); + __ Assert(eq, "oddball in symbol table is not undefined or the hole"); + } + __ b(&next_probe[i]); + + __ bind(&is_string); + + // Check that the candidate is a non-external ASCII string. The instance + // type is still in the scratch register from the CompareObjectType + // operation. + __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]); + + // If length is not 2 the string is not a candidate. + __ LoadP(scratch, FieldMemOperand(candidate, String::kLengthOffset)); + __ CmpSmiLiteral(scratch, Smi::FromInt(2), r0); + __ bne(&next_probe[i]); + + // Check if the two characters match. + __ lhz(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize)); + __ cmp(chars, scratch); + __ beq(&found_in_symbol_table); + __ bind(&next_probe[i]); + } + + // No matching 2 character string found by probing. + __ b(not_found); + + // Scratch register contains result when we fall through to here. + Register result = candidate; + __ bind(&found_in_symbol_table); + __ mr(r3, result); +} + + +void StringHelper::GenerateHashInit(MacroAssembler* masm, + Register hash, + Register character, + Register scratch) { + // hash = character + (character << 10); + __ LoadRoot(hash, Heap::kHashSeedRootIndex); + // Untag smi seed and add the character. + __ SmiUntag(scratch, hash); + __ add(hash, character, scratch); + // hash += hash << 10; + __ slwi(scratch, hash, Operand(10)); + __ add(hash, hash, scratch); + // hash ^= hash >> 6; + __ srwi(scratch, hash, Operand(6)); + __ xor_(hash, hash, scratch); +} + + +void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, + Register hash, + Register character, + Register scratch) { + // hash += character; + __ add(hash, hash, character); + // hash += hash << 10; + __ slwi(scratch, hash, Operand(10)); + __ add(hash, hash, scratch); + // hash ^= hash >> 6; + __ srwi(scratch, hash, Operand(6)); + __ xor_(hash, hash, scratch); +} + + +void StringHelper::GenerateHashGetHash(MacroAssembler* masm, + Register hash, + Register scratch) { + // hash += hash << 3; + __ slwi(scratch, hash, Operand(3)); + __ add(hash, hash, scratch); + // hash ^= hash >> 11; + __ srwi(scratch, hash, Operand(11)); + __ xor_(hash, hash, scratch); + // hash += hash << 15; + __ slwi(scratch, hash, Operand(15)); + __ add(hash, hash, scratch); + + __ mov(scratch, Operand(String::kHashBitMask)); + __ and_(hash, hash, scratch, SetRC); + + // if (hash == 0) hash = 27; + Label done; + __ bne(&done, cr0); + __ li(hash, Operand(StringHasher::kZeroHash)); + __ bind(&done); +} + + +void SubStringStub::Generate(MacroAssembler* masm) { + Label runtime; + + // Stack frame on entry. + // lr: return address + // sp[0]: to + // sp[4]: from + // sp[8]: string + + // This stub is called from the native-call %_SubString(...), so + // nothing can be assumed about the arguments. It is tested that: + // "string" is a sequential string, + // both "from" and "to" are smis, and + // 0 <= from <= to <= string.length. + // If any of these assumptions fail, we call the runtime system. + + const int kToOffset = 0 * kPointerSize; + const int kFromOffset = 1 * kPointerSize; + const int kStringOffset = 2 * kPointerSize; + + __ LoadP(r5, MemOperand(sp, kToOffset)); + __ LoadP(r6, MemOperand(sp, kFromOffset)); + + // If either to or from had the smi tag bit set, then fail to generic runtime + __ JumpIfNotSmi(r5, &runtime); + __ JumpIfNotSmi(r6, &runtime); + __ SmiUntag(r5); + __ SmiUntag(r6, SetRC); + // Both r5 and r6 are untagged integers. + + // We want to bailout to runtime here if From is negative. + __ blt(&runtime, cr0); // From < 0. + + __ cmpl(r6, r5); + __ bgt(&runtime); // Fail if from > to. + __ sub(r5, r5, r6); + + // Make sure first argument is a string. + __ LoadP(r3, MemOperand(sp, kStringOffset)); + __ JumpIfSmi(r3, &runtime); + Condition is_string = masm->IsObjectStringType(r3, r4); + __ b(NegateCondition(is_string), &runtime, cr0); + + // Short-cut for the case of trivial substring. + Label return_r3; + // r3: original string + // r5: result string length + __ LoadP(r7, FieldMemOperand(r3, String::kLengthOffset)); + __ SmiUntag(r0, r7); + __ cmpl(r5, r0); + // Return original string. + __ beq(&return_r3); + // Longer than original string's length or negative: unsafe arguments. + __ bgt(&runtime); + // Shorter than original string's length: an actual substring. + + // Deal with different string types: update the index if necessary + // and put the underlying string into r8. + // r3: original string + // r4: instance type + // r5: length + // r6: from index (untagged) + Label underlying_unpacked, sliced_string, seq_or_external_string; + // If the string is not indirect, it can only be sequential or external. + STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag)); + STATIC_ASSERT(kIsIndirectStringMask != 0); + __ andi(r0, r4, Operand(kIsIndirectStringMask)); + __ beq(&seq_or_external_string, cr0); + + __ andi(r0, r4, Operand(kSlicedNotConsMask)); + __ bne(&sliced_string, cr0); + // Cons string. Check whether it is flat, then fetch first part. + __ LoadP(r8, FieldMemOperand(r3, ConsString::kSecondOffset)); + __ CompareRoot(r8, Heap::kEmptyStringRootIndex); + __ bne(&runtime); + __ LoadP(r8, FieldMemOperand(r3, ConsString::kFirstOffset)); + // Update instance type. + __ LoadP(r4, FieldMemOperand(r8, HeapObject::kMapOffset)); + __ lbz(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); + __ b(&underlying_unpacked); + + __ bind(&sliced_string); + // Sliced string. Fetch parent and correct start index by offset. + __ LoadP(r8, FieldMemOperand(r3, SlicedString::kParentOffset)); + __ LoadP(r7, FieldMemOperand(r3, SlicedString::kOffsetOffset)); + __ SmiUntag(r4, r7); + __ add(r6, r6, r4); // Add offset to index. + // Update instance type. + __ LoadP(r4, FieldMemOperand(r8, HeapObject::kMapOffset)); + __ lbz(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); + __ b(&underlying_unpacked); + + __ bind(&seq_or_external_string); + // Sequential or external string. Just move string to the expected register. + __ mr(r8, r3); + + __ bind(&underlying_unpacked); + + if (FLAG_string_slices) { + Label copy_routine; + // r8: underlying subject string + // r4: instance type of underlying subject string + // r5: length + // r6: adjusted start index (untagged) + __ cmpi(r5, Operand(SlicedString::kMinLength)); + // Short slice. Copy instead of slicing. + __ blt(©_routine); + // Allocate new sliced string. At this point we do not reload the instance + // type including the string encoding because we simply rely on the info + // provided by the original string. It does not matter if the original + // string's encoding is wrong because we always have to recheck encoding of + // the newly created string's parent anyways due to externalized strings. + Label two_byte_slice, set_slice_header; + STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0); + STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); + __ andi(r0, r4, Operand(kStringEncodingMask)); + __ beq(&two_byte_slice, cr0); + __ AllocateAsciiSlicedString(r3, r5, r9, r10, &runtime); + __ b(&set_slice_header); + __ bind(&two_byte_slice); + __ AllocateTwoByteSlicedString(r3, r5, r9, r10, &runtime); + __ bind(&set_slice_header); + __ SmiTag(r6); + __ StoreP(r8, FieldMemOperand(r3, SlicedString::kParentOffset), r0); + __ StoreP(r6, FieldMemOperand(r3, SlicedString::kOffsetOffset), r0); + __ b(&return_r3); + + __ bind(©_routine); + } + + // r8: underlying subject string + // r4: instance type of underlying subject string + // r5: length + // r6: adjusted start index (untagged) + Label two_byte_sequential, sequential_string, allocate_result; + STATIC_ASSERT(kExternalStringTag != 0); + STATIC_ASSERT(kSeqStringTag == 0); + __ andi(r0, r4, Operand(kExternalStringTag)); + __ beq(&sequential_string, cr0); + + // Handle external string. + // Rule out short external strings. + STATIC_CHECK(kShortExternalStringTag != 0); + __ andi(r0, r4, Operand(kShortExternalStringTag)); + __ bne(&runtime, cr0); + __ LoadP(r8, FieldMemOperand(r8, ExternalString::kResourceDataOffset)); + // r8 already points to the first character of underlying string. + __ b(&allocate_result); + + __ bind(&sequential_string); + // Locate first character of underlying subject string. + STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize); + __ addi(r8, r8, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + + __ bind(&allocate_result); + // Sequential acii string. Allocate the result. + STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0); + __ andi(r0, r4, Operand(kStringEncodingMask)); + __ beq(&two_byte_sequential, cr0); + + // Allocate and copy the resulting ASCII string. + __ AllocateAsciiString(r3, r5, r7, r9, r10, &runtime); + + // Locate first character of substring to copy. + __ add(r8, r8, r6); + // Locate first character of result. + __ addi(r4, r3, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + + // r3: result string + // r4: first character of result string + // r5: result string length + // r8: first character of substring to copy + STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0); + StringHelper::GenerateCopyCharactersLong(masm, r4, r8, r5, r6, r7, r9, + r10, r22, COPY_ASCII | DEST_ALWAYS_ALIGNED); + __ b(&return_r3); + + // Allocate and copy the resulting two-byte string. + __ bind(&two_byte_sequential); + __ AllocateTwoByteString(r3, r5, r7, r9, r10, &runtime); + + // Locate first character of substring to copy. + __ ShiftLeftImm(r4, r6, Operand(1)); + __ add(r8, r8, r4); + // Locate first character of result. + __ addi(r4, r3, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + + // r3: result string. + // r4: first character of result. + // r5: result length. + // r8: first character of substring to copy. + STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); + StringHelper::GenerateCopyCharactersLong( + masm, r4, r8, r5, r6, r7, r9, r10, r22, DEST_ALWAYS_ALIGNED); + + __ bind(&return_r3); + Counters* counters = masm->isolate()->counters(); + __ IncrementCounter(counters->sub_string_native(), 1, r6, r7); + __ addi(sp, sp, Operand(3 * kPointerSize)); + __ Ret(); + + // Just jump to runtime to create the sub string. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kSubString, 3, 1); +} + + +void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm, + Register left, + Register right, + Register scratch1, + Register scratch2) { + Register length = scratch1; + + // Compare lengths. + Label strings_not_equal, check_zero_length; + __ LoadP(length, FieldMemOperand(left, String::kLengthOffset)); + __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset)); + __ cmp(length, scratch2); + __ beq(&check_zero_length); + __ bind(&strings_not_equal); + __ LoadSmiLiteral(r3, Smi::FromInt(NOT_EQUAL)); + __ Ret(); + + // Check if the length is zero. + Label compare_chars; + __ bind(&check_zero_length); + STATIC_ASSERT(kSmiTag == 0); + __ cmpi(length, Operand::Zero()); + __ bne(&compare_chars); + __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL)); + __ Ret(); + + // Compare characters. + __ bind(&compare_chars); + GenerateAsciiCharsCompareLoop(masm, + left, right, length, scratch2, + &strings_not_equal); + + // Characters are equal. + __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL)); + __ Ret(); +} + + +void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, + Register left, + Register right, + Register scratch1, + Register scratch2, + Register scratch3) { + Label skip, result_not_equal, compare_lengths; + // Find minimum length and length difference. + __ LoadP(scratch1, FieldMemOperand(left, String::kLengthOffset)); + __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset)); + __ sub(scratch3, scratch1, scratch2, LeaveOE, SetRC); + Register length_delta = scratch3; + __ ble(&skip, cr0); + __ mr(scratch1, scratch2); + __ bind(&skip); + Register min_length = scratch1; + STATIC_ASSERT(kSmiTag == 0); + __ cmpi(min_length, Operand::Zero()); + __ beq(&compare_lengths); + + // Compare loop. + GenerateAsciiCharsCompareLoop(masm, + left, right, min_length, scratch2, + &result_not_equal); + + // Compare lengths - strings up to min-length are equal. + __ bind(&compare_lengths); + ASSERT(Smi::FromInt(EQUAL) == static_cast(0)); + // Use length_delta as result if it's zero. + __ mr(r3, length_delta); + __ cmpi(r3, Operand::Zero()); + __ bind(&result_not_equal); + // Conditionally update the result based either on length_delta or + // the last comparion performed in the loop above. + Label less_equal, equal; + __ ble(&less_equal); + __ LoadSmiLiteral(r3, Smi::FromInt(GREATER)); + __ Ret(); + __ bind(&less_equal); + __ beq(&equal); + __ LoadSmiLiteral(r3, Smi::FromInt(LESS)); + __ bind(&equal); + __ Ret(); +} + + +void StringCompareStub::GenerateAsciiCharsCompareLoop( + MacroAssembler* masm, + Register left, + Register right, + Register length, + Register scratch1, + Label* chars_not_equal) { + // Change index to run from -length to -1 by adding length to string + // start. This means that loop ends when index reaches zero, which + // doesn't need an additional compare. + __ SmiUntag(length); + __ addi(scratch1, length, + Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(left, left, scratch1); + __ add(right, right, scratch1); + __ subfic(length, length, Operand::Zero()); + Register index = length; // index = -length; + + // Compare loop. + Label loop; + __ bind(&loop); + __ lbzx(scratch1, MemOperand(left, index)); + __ lbzx(r0, MemOperand(right, index)); + __ cmp(scratch1, r0); + __ bne(chars_not_equal); + __ addi(index, index, Operand(1)); + __ cmpi(index, Operand::Zero()); + __ bne(&loop); +} + + +void StringCompareStub::Generate(MacroAssembler* masm) { + Label runtime; + + Counters* counters = masm->isolate()->counters(); + + // Stack frame on entry. + // sp[0]: right string + // sp[4]: left string + __ LoadP(r3, MemOperand(sp)); // Load right in r3, left in r4. + __ LoadP(r4, MemOperand(sp, kPointerSize)); + + Label not_same; + __ cmp(r3, r4); + __ bne(¬_same); + STATIC_ASSERT(EQUAL == 0); + STATIC_ASSERT(kSmiTag == 0); + __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL)); + __ IncrementCounter(counters->string_compare_native(), 1, r4, r5); + __ addi(sp, sp, Operand(2 * kPointerSize)); + __ Ret(); + + __ bind(¬_same); + + // Check that both objects are sequential ASCII strings. + __ JumpIfNotBothSequentialAsciiStrings(r4, r3, r5, r6, &runtime); + + // Compare flat ASCII strings natively. Remove arguments from stack first. + __ IncrementCounter(counters->string_compare_native(), 1, r5, r6); + __ addi(sp, sp, Operand(2 * kPointerSize)); + GenerateCompareFlatAsciiStrings(masm, r4, r3, r5, r6, r7); + + // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) + // tagged as a small integer. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kStringCompare, 2, 1); +} + + +void StringAddStub::Generate(MacroAssembler* masm) { + Label call_runtime, call_builtin; + Builtins::JavaScript builtin_id = Builtins::ADD; + + Counters* counters = masm->isolate()->counters(); + + // Stack on entry: + // sp[0]: second argument (right). + // sp[4]: first argument (left). + + // Load the two arguments. + __ LoadP(r3, MemOperand(sp, 1 * kPointerSize)); // First argument. + __ LoadP(r4, MemOperand(sp, 0 * kPointerSize)); // Second argument. + + // Make sure that both arguments are strings if not known in advance. + if (flags_ == NO_STRING_ADD_FLAGS) { + __ JumpIfEitherSmi(r3, r4, &call_runtime); + // Load instance types. + __ LoadP(r7, FieldMemOperand(r3, HeapObject::kMapOffset)); + __ LoadP(r8, FieldMemOperand(r4, HeapObject::kMapOffset)); + __ lbz(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset)); + __ lbz(r8, FieldMemOperand(r8, Map::kInstanceTypeOffset)); + STATIC_ASSERT(kStringTag == 0); + // If either is not a string, go to runtime. + __ andi(r0, r7, Operand(kIsNotStringMask)); + __ bne(&call_runtime, cr0); + __ andi(r0, r8, Operand(kIsNotStringMask)); + __ bne(&call_runtime, cr0); + } else { + // Here at least one of the arguments is definitely a string. + // We convert the one that is not known to be a string. + if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) { + ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0); + GenerateConvertArgument( + masm, 1 * kPointerSize, r3, r5, r6, r7, r8, &call_builtin); + builtin_id = Builtins::STRING_ADD_RIGHT; + } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) { + ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0); + GenerateConvertArgument( + masm, 0 * kPointerSize, r4, r5, r6, r7, r8, &call_builtin); + builtin_id = Builtins::STRING_ADD_LEFT; + } + } + + // Both arguments are strings. + // r3: first string + // r4: second string + // r7: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) + // r8: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) + { + Label first_not_empty, return_second, strings_not_empty; + // Check if either of the strings are empty. In that case return the other. + __ LoadP(r5, FieldMemOperand(r3, String::kLengthOffset)); + __ LoadP(r6, FieldMemOperand(r4, String::kLengthOffset)); + STATIC_ASSERT(kSmiTag == 0); + // Test if first string is empty. + __ CmpSmiLiteral(r5, Smi::FromInt(0), r0); + __ bne(&first_not_empty); + __ mr(r3, r4); // If first is empty, return second. + __ b(&return_second); + STATIC_ASSERT(kSmiTag == 0); + __ bind(&first_not_empty); + // Else test if second string is empty. + __ CmpSmiLiteral(r6, Smi::FromInt(0), r0); + __ bne(&strings_not_empty); // If either string was empty, return r3. + + __ bind(&return_second); + __ IncrementCounter(counters->string_add_native(), 1, r5, r6); + __ addi(sp, sp, Operand(2 * kPointerSize)); + __ Ret(); + + __ bind(&strings_not_empty); + } + + __ SmiUntag(r5); + __ SmiUntag(r6); + // Both strings are non-empty. + // r3: first string + // r4: second string + // r5: length of first string + // r6: length of second string + // r7: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) + // r8: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) + // Look at the length of the result of adding the two strings. + Label string_add_flat_result, longer_than_two; + // Adding two lengths can't overflow. + STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2); + __ add(r9, r5, r6); + // Use the symbol table when adding two one character strings, as it + // helps later optimizations to return a symbol here. + __ cmpi(r9, Operand(2)); + __ bne(&longer_than_two); + + // Check that both strings are non-external ASCII strings. + if (flags_ != NO_STRING_ADD_FLAGS) { + __ LoadP(r7, FieldMemOperand(r3, HeapObject::kMapOffset)); + __ LoadP(r8, FieldMemOperand(r4, HeapObject::kMapOffset)); + __ lbz(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset)); + __ lbz(r8, FieldMemOperand(r8, Map::kInstanceTypeOffset)); + } + __ JumpIfBothInstanceTypesAreNotSequentialAscii(r7, r8, r9, r10, + &call_runtime); + + // Get the two characters forming the sub string. + __ lbz(r5, FieldMemOperand(r3, SeqAsciiString::kHeaderSize)); + __ lbz(r6, FieldMemOperand(r4, SeqAsciiString::kHeaderSize)); + + // Try to lookup two character string in symbol table. If it is not found + // just allocate a new one. + Label make_two_character_string; + StringHelper::GenerateTwoCharacterSymbolTableProbe( + masm, r5, r6, r9, r10, r7, r8, r22, &make_two_character_string); + __ IncrementCounter(counters->string_add_native(), 1, r5, r6); + __ addi(sp, sp, Operand(2 * kPointerSize)); + __ Ret(); + + __ bind(&make_two_character_string); + // Resulting string has length 2 and first chars of two strings + // are combined into single halfword in r5 register. + // So we can fill resulting string without two loops by a single + // halfword store instruction + __ li(r9, Operand(2)); + __ AllocateAsciiString(r3, r9, r7, r8, r22, &call_runtime); + __ sth(r5, FieldMemOperand(r3, SeqAsciiString::kHeaderSize)); + __ IncrementCounter(counters->string_add_native(), 1, r5, r6); + __ addi(sp, sp, Operand(2 * kPointerSize)); + __ Ret(); + + __ bind(&longer_than_two); + // Check if resulting string will be flat. + __ cmpi(r9, Operand(ConsString::kMinLength)); + __ blt(&string_add_flat_result); + // Handle exceptionally long strings in the runtime system. + STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0); + ASSERT(IsPowerOf2(String::kMaxLength + 1)); + // kMaxLength + 1 is representable as shifted literal, kMaxLength is not. + __ mov(r10, Operand(String::kMaxLength + 1)); + __ cmpl(r9, r10); + __ bge(&call_runtime); + + // If result is not supposed to be flat, allocate a cons string object. + // If both strings are ASCII the result is an ASCII cons string. + if (flags_ != NO_STRING_ADD_FLAGS) { + __ LoadP(r7, FieldMemOperand(r3, HeapObject::kMapOffset)); + __ LoadP(r8, FieldMemOperand(r4, HeapObject::kMapOffset)); + __ lbz(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset)); + __ lbz(r8, FieldMemOperand(r8, Map::kInstanceTypeOffset)); + } + Label non_ascii, allocated, ascii_data; + STATIC_ASSERT(kTwoByteStringTag == 0); + __ andi(r0, r7, Operand(kStringEncodingMask)); + __ beq(&non_ascii, cr0); + __ andi(r0, r8, Operand(kStringEncodingMask)); + __ beq(&non_ascii, cr0); + + // Allocate an ASCII cons string. + __ bind(&ascii_data); + __ AllocateAsciiConsString(r10, r9, r7, r8, &call_runtime); + __ bind(&allocated); + // Fill the fields of the cons string. + __ StoreP(r3, FieldMemOperand(r10, ConsString::kFirstOffset), r0); + __ StoreP(r4, FieldMemOperand(r10, ConsString::kSecondOffset), r0); + __ mr(r3, r10); + __ IncrementCounter(counters->string_add_native(), 1, r5, r6); + __ addi(sp, sp, Operand(2 * kPointerSize)); + __ Ret(); + + __ bind(&non_ascii); + // At least one of the strings is two-byte. Check whether it happens + // to contain only ASCII characters. + // r7: first instance type. + // r8: second instance type. + __ andi(r0, r7, Operand(kAsciiDataHintMask)); + __ bne(&ascii_data, cr0); + __ andi(r0, r8, Operand(kAsciiDataHintMask)); + __ bne(&ascii_data, cr0); + __ xor_(r7, r7, r8); + STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); + __ andi(r7, r7, Operand(kAsciiStringTag | kAsciiDataHintTag)); + __ cmpi(r7, Operand(kAsciiStringTag | kAsciiDataHintTag)); + __ beq(&ascii_data); + + // Allocate a two byte cons string. + __ AllocateTwoByteConsString(r10, r9, r7, r8, &call_runtime); + __ b(&allocated); + + // We cannot encounter sliced strings or cons strings here since: + STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength); + // Handle creating a flat result from either external or sequential strings. + // Locate the first characters' locations. + // r3: first string + // r4: second string + // r5: length of first string + // r6: length of second string + // r7: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) + // r8: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) + // r9: sum of lengths. + Label first_prepared, second_prepared, external_string1, external_string2; + __ bind(&string_add_flat_result); + if (flags_ != NO_STRING_ADD_FLAGS) { + __ LoadP(r7, FieldMemOperand(r3, HeapObject::kMapOffset)); + __ LoadP(r8, FieldMemOperand(r4, HeapObject::kMapOffset)); + __ lbz(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset)); + __ lbz(r8, FieldMemOperand(r8, Map::kInstanceTypeOffset)); + } + + // Check whether both strings have same encoding + __ xor_(r10, r7, r8); + __ andi(r0, r10, Operand(kStringEncodingMask)); + __ bne(&call_runtime, cr0); + + STATIC_ASSERT(kSeqStringTag == 0); + __ andi(r0, r7, Operand(kStringRepresentationMask)); + __ bne(&external_string1, cr0); + STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); + __ addi(r10, r3, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ b(&first_prepared); + // External string: rule out short external string and load string resource. + STATIC_ASSERT(kShortExternalStringTag != 0); + __ bind(&external_string1); + __ andi(r0, r7, Operand(kShortExternalStringMask)); + __ bne(&call_runtime, cr0); + __ LoadP(r10, FieldMemOperand(r3, ExternalString::kResourceDataOffset)); + __ bind(&first_prepared); + + STATIC_ASSERT(kSeqStringTag == 0); + __ andi(r0, r8, Operand(kStringRepresentationMask)); + __ bne(&external_string2, cr0); + STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); + __ addi(r4, r4, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ b(&second_prepared); + // External string: rule out short external string and load string resource. + STATIC_ASSERT(kShortExternalStringTag != 0); + __ bind(&external_string2); + __ andi(r0, r8, Operand(kShortExternalStringMask)); + __ bne(&call_runtime, cr0); + __ LoadP(r4, FieldMemOperand(r4, ExternalString::kResourceDataOffset)); + __ bind(&second_prepared); + + Label non_ascii_string_add_flat_result; + // r10: first character of first string + // r4: first character of second string + // r5: length of first string. + // r6: length of second string. + // r9: sum of lengths. + // Both strings have the same encoding. + STATIC_ASSERT(kTwoByteStringTag == 0); + __ andi(r0, r8, Operand(kStringEncodingMask)); + __ beq(&non_ascii_string_add_flat_result, cr0); + + __ AllocateAsciiString(r3, r9, r7, r8, r22, &call_runtime); + __ addi(r9, r3, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + // r3: result string. + // r10: first character of first string. + // r4: first character of second string. + // r5: length of first string. + // r6: length of second string. + // r9: first character of result. + StringHelper::GenerateCopyCharacters(masm, r9, r10, r5, r7, true); + // r9: next character of result. + StringHelper::GenerateCopyCharacters(masm, r9, r4, r6, r7, true); + __ IncrementCounter(counters->string_add_native(), 1, r5, r6); + __ addi(sp, sp, Operand(2 * kPointerSize)); + __ Ret(); + + __ bind(&non_ascii_string_add_flat_result); + __ AllocateTwoByteString(r3, r9, r7, r8, r22, &call_runtime); + __ addi(r9, r3, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + // r3: result string. + // r10: first character of first string. + // r4: first character of second string. + // r5: length of first string. + // r6: length of second string. + // r9: first character of result. + StringHelper::GenerateCopyCharacters(masm, r9, r10, r5, r7, false); + // r9: next character of result. + StringHelper::GenerateCopyCharacters(masm, r9, r4, r6, r7, false); + __ IncrementCounter(counters->string_add_native(), 1, r5, r6); + __ addi(sp, sp, Operand(2 * kPointerSize)); + __ Ret(); + + // Just jump to runtime to add the two strings. + __ bind(&call_runtime); + __ TailCallRuntime(Runtime::kStringAdd, 2, 1); + + if (call_builtin.is_linked()) { + __ bind(&call_builtin); + __ InvokeBuiltin(builtin_id, JUMP_FUNCTION); + } +} + + +void StringAddStub::GenerateConvertArgument(MacroAssembler* masm, + int stack_offset, + Register arg, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Label* slow) { + // First check if the argument is already a string. + Label not_string, done; + __ JumpIfSmi(arg, ¬_string); + __ CompareObjectType(arg, scratch1, scratch1, FIRST_NONSTRING_TYPE); + __ blt(&done); + + // Check the number to string cache. + Label not_cached; + __ bind(¬_string); + // Puts the cached result into scratch1. + NumberToStringStub::GenerateLookupNumberStringCache(masm, + arg, + scratch1, + scratch2, + scratch3, + scratch4, + false, + ¬_cached); + __ mr(arg, scratch1); + __ StoreP(arg, MemOperand(sp, stack_offset)); + __ b(&done); + + // Check if the argument is a safe string wrapper. + __ bind(¬_cached); + __ JumpIfSmi(arg, slow); + __ CompareObjectType( + arg, scratch1, scratch2, JS_VALUE_TYPE); // map -> scratch1. + __ bne(slow); + __ lbz(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset)); + __ andi(scratch2, + scratch2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf)); + __ cmpi(scratch2, + Operand(1 << Map::kStringWrapperSafeForDefaultValueOf)); + __ bne(slow); + __ LoadP(arg, FieldMemOperand(arg, JSValue::kValueOffset)); + __ StoreP(arg, MemOperand(sp, stack_offset)); + + __ bind(&done); +} + + +void ICCompareStub::GenerateSmis(MacroAssembler* masm) { + ASSERT(state_ == CompareIC::SMIS); + Label miss; + __ orx(r5, r4, r3); + __ JumpIfNotSmi(r5, &miss); + + if (GetCondition() == eq) { + // For equality we do not care about the sign of the result. + // __ sub(r3, r3, r4, SetCC); + __ sub(r3, r3, r4); + } else { + // Untag before subtracting to avoid handling overflow. + __ SmiUntag(r4); + __ SmiUntag(r3); + __ sub(r3, r4, r3); + } + __ Ret(); + + __ bind(&miss); + GenerateMiss(masm); +} + + +void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { + ASSERT(state_ == CompareIC::HEAP_NUMBERS); + Label generic_stub; + Label unordered, maybe_undefined1, maybe_undefined2; + Label miss; + Label equal, less_than; + + __ and_(r5, r4, r3); + __ JumpIfSmi(r5, &generic_stub); + + __ CompareObjectType(r3, r5, r5, HEAP_NUMBER_TYPE); + __ bne(&maybe_undefined1); + __ CompareObjectType(r4, r5, r5, HEAP_NUMBER_TYPE); + __ bne(&maybe_undefined2); + + // Inlining the double comparison and falling back to the general compare + // stub if NaN is involved + + // Load left and right operand + // likely we can combine the constants to remove the sub + __ lfd(d0, FieldMemOperand(r4, HeapNumber::kValueOffset)); + __ lfd(d1, FieldMemOperand(r3, HeapNumber::kValueOffset)); + + // Compare operands + __ fcmpu(d0, d1); + + // Don't base result on status bits when a NaN is involved. + __ bunordered(&unordered); + + // Return a result of -1, 0, or 1, based on status bits. + __ beq(&equal); + __ blt(&less_than); + // assume greater than + __ li(r3, Operand(GREATER)); + __ Ret(); + __ bind(&equal); + __ li(r3, Operand(EQUAL)); + __ Ret(); + __ bind(&less_than); + __ li(r3, Operand(LESS)); + __ Ret(); + + __ bind(&unordered); + + CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r4, r3); + __ bind(&generic_stub); + __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); + + __ bind(&maybe_undefined1); + if (Token::IsOrderedRelationalCompareOp(op_)) { + __ CompareRoot(r3, Heap::kUndefinedValueRootIndex); + __ bne(&miss); + __ CompareObjectType(r4, r5, r5, HEAP_NUMBER_TYPE); + __ bne(&maybe_undefined2); + __ b(&unordered); + } + + __ bind(&maybe_undefined2); + if (Token::IsOrderedRelationalCompareOp(op_)) { + __ CompareRoot(r4, Heap::kUndefinedValueRootIndex); + __ beq(&unordered); + } + + __ bind(&miss); + GenerateMiss(masm); +} + + +void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { + ASSERT(state_ == CompareIC::SYMBOLS); + Label miss, not_equal; + + // Registers containing left and right operands respectively. + Register left = r4; + Register right = r3; + Register tmp1 = r5; + Register tmp2 = r6; + + // Check that both operands are heap objects. + __ JumpIfEitherSmi(left, right, &miss); + + // Check that both operands are symbols. + __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); + __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); + __ lbz(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); + __ lbz(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); + STATIC_ASSERT(kSymbolTag != 0); + __ and_(tmp1, tmp1, tmp2); + __ andi(r0, tmp1, Operand(kIsSymbolMask)); + __ beq(&miss, cr0); + + // Symbols are compared by identity. + __ cmp(left, right); + __ bne(¬_equal); + // Make sure r3 is non-zero. At this point input operands are + // guaranteed to be non-zero. + ASSERT(right.is(r3)); + STATIC_ASSERT(EQUAL == 0); + STATIC_ASSERT(kSmiTag == 0); + __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL)); + __ bind(¬_equal); + __ Ret(); + + __ bind(&miss); + GenerateMiss(masm); +} + + +void ICCompareStub::GenerateStrings(MacroAssembler* masm) { + ASSERT(state_ == CompareIC::STRINGS); + Label miss, not_identical, is_symbol; + + bool equality = Token::IsEqualityOp(op_); + + // Registers containing left and right operands respectively. + Register left = r4; + Register right = r3; + Register tmp1 = r5; + Register tmp2 = r6; + Register tmp3 = r7; + Register tmp4 = r8; + + // Check that both operands are heap objects. + __ JumpIfEitherSmi(left, right, &miss); + + // Check that both operands are strings. This leaves the instance + // types loaded in tmp1 and tmp2. + __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); + __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); + __ lbz(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); + __ lbz(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); + STATIC_ASSERT(kNotStringTag != 0); + __ orx(tmp3, tmp1, tmp2); + __ andi(r0, tmp3, Operand(kIsNotStringMask)); + __ bne(&miss, cr0); + + // Fast check for identical strings. + __ cmp(left, right); + STATIC_ASSERT(EQUAL == 0); + STATIC_ASSERT(kSmiTag == 0); + __ bne(¬_identical); + __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL)); + __ Ret(); + __ bind(¬_identical); + + // Handle not identical strings. + + // Check that both strings are symbols. If they are, we're done + // because we already know they are not identical. + if (equality) { + ASSERT(GetCondition() == eq); + STATIC_ASSERT(kSymbolTag != 0); + __ and_(tmp3, tmp1, tmp2); + __ andi(r0, tmp3, Operand(kIsSymbolMask)); + __ beq(&is_symbol, cr0); + // Make sure r3 is non-zero. At this point input operands are + // guaranteed to be non-zero. + ASSERT(right.is(r3)); + __ Ret(); + __ bind(&is_symbol); + } + + // Check that both strings are sequential ASCII. + Label runtime; + __ JumpIfBothInstanceTypesAreNotSequentialAscii( + tmp1, tmp2, tmp3, tmp4, &runtime); + + // Compare flat ASCII strings. Returns when done. + if (equality) { + StringCompareStub::GenerateFlatAsciiStringEquals( + masm, left, right, tmp1, tmp2); + } else { + StringCompareStub::GenerateCompareFlatAsciiStrings( + masm, left, right, tmp1, tmp2, tmp3); + } + + // Handle more complex cases in runtime. + __ bind(&runtime); + __ Push(left, right); + if (equality) { + __ TailCallRuntime(Runtime::kStringEquals, 2, 1); + } else { + __ TailCallRuntime(Runtime::kStringCompare, 2, 1); + } + + __ bind(&miss); + GenerateMiss(masm); +} + + +void ICCompareStub::GenerateObjects(MacroAssembler* masm) { + ASSERT(state_ == CompareIC::OBJECTS); + Label miss; + __ and_(r5, r4, r3); + __ JumpIfSmi(r5, &miss); + + __ CompareObjectType(r3, r5, r5, JS_OBJECT_TYPE); + __ bne(&miss); + __ CompareObjectType(r4, r5, r5, JS_OBJECT_TYPE); + __ bne(&miss); + + ASSERT(GetCondition() == eq); + __ sub(r3, r3, r4); + __ Ret(); + + __ bind(&miss); + GenerateMiss(masm); +} + + +void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) { + Label miss; + __ and_(r5, r4, r3); + __ JumpIfSmi(r5, &miss); + __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset)); + __ LoadP(r6, FieldMemOperand(r4, HeapObject::kMapOffset)); + __ Cmpi(r5, Operand(known_map_), r0); + __ bne(&miss); + __ Cmpi(r6, Operand(known_map_), r0); + __ bne(&miss); + + __ sub(r3, r3, r4); + __ Ret(); + + __ bind(&miss); + GenerateMiss(masm); +} + + + +void ICCompareStub::GenerateMiss(MacroAssembler* masm) { + { + // Call the runtime system in a fresh internal frame. + ExternalReference miss = + ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate()); + + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(r4, r3); + __ mflr(r0); + __ push(r0); + __ Push(r4, r3); + __ LoadSmiLiteral(ip, Smi::FromInt(op_)); + __ push(ip); + __ CallExternalReference(miss, 3); + // Compute the entry point of the rewritten stub. + __ addi(r5, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); + // Restore registers. + __ pop(r0); + __ mtlr(r0); + __ pop(r3); + __ pop(r4); + } + + __ Jump(r5); +} + +// This stub is paired with DirectCEntryStub::GenerateCall +void DirectCEntryStub::Generate(MacroAssembler* masm) { + // Retrieve return address + __ LoadP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize)); + __ Jump(r0); +} + + +void DirectCEntryStub::GenerateCall(MacroAssembler* masm, + ExternalReference function) { + __ mov(r6, Operand(function)); + GenerateCall(masm, r6); +} + + +void DirectCEntryStub::GenerateCall(MacroAssembler* masm, + Register target) { + Register scratch = r11; +#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR) + Register dest = ip; + // Native AIX/PPC64 Linux use a function descriptor. + __ LoadP(ToRegister(2), MemOperand(target, kPointerSize)); // TOC + __ LoadP(ip, MemOperand(target, 0)); // Instruction address +#elif ABI_TOC_ADDRESSABILITY_VIA_IP + Register dest = ip; + __ Move(ip, target); +#else + Register dest = target; +#endif + + __ mov(r0, Operand(reinterpret_cast(GetCode().location()), + RelocInfo::CODE_TARGET)); + + // Block the trampoline pool through the whole function to make sure the + // number of generated instructions is constant. + Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm); + + // Push return address (accessible to GC through exit frame pc). + Label start, here; + __ bind(&start); + __ b(&here, SetLK); + __ bind(&here); + __ mflr(scratch); + __ mtlr(r0); // from above, so we know where to return + __ addi(scratch, scratch, Operand(6 * Assembler::kInstrSize)); + __ StoreP(scratch, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize)); + __ Jump(dest); // Call the C++ function. + ASSERT_EQ(Assembler::kInstrSize + + (6 * Assembler::kInstrSize), + masm->SizeOfCodeGeneratedSince(&start)); +} + + +void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, + Label* miss, + Label* done, + Register receiver, + Register properties, + Handle name, + Register scratch0) { + // If names of slots in range from 1 to kProbes - 1 for the hash value are + // not equal to the name and kProbes-th slot is not used (its name is the + // undefined value), it guarantees the hash table doesn't contain the + // property. It's true even if some slots represent deleted properties + // (their names are the hole value). + for (int i = 0; i < kInlinedProbes; i++) { + // scratch0 points to properties hash. + // Compute the masked index: (hash + i + i * i) & mask. + Register index = scratch0; + // Capacity is smi 2^n. + __ LoadP(index, FieldMemOperand(properties, kCapacityOffset)); + __ subi(index, index, Operand(1)); + __ LoadSmiLiteral(ip, Smi::FromInt(name->Hash() + + StringDictionary::GetProbeOffset(i))); + __ and_(index, index, ip); + + // Scale the index by multiplying by the entry size. + ASSERT(StringDictionary::kEntrySize == 3); + __ ShiftLeftImm(ip, index, Operand(1)); + __ add(index, index, ip); // index *= 3. + + Register entity_name = scratch0; + // Having undefined at this place means the name is not contained. + Register tmp = properties; + __ SmiToPtrArrayOffset(ip, index); + __ add(tmp, properties, ip); + __ LoadP(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); + + ASSERT(!tmp.is(entity_name)); + __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex); + __ cmp(entity_name, tmp); + __ beq(done); + + if (i != kInlinedProbes - 1) { + // Load the hole ready for use below: + __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex); + + // Stop if found the property. + __ Cmpi(entity_name, Operand(Handle(name)), r0); + __ beq(miss); + + Label the_hole; + __ cmp(entity_name, tmp); + __ beq(&the_hole); + + // Check if the entry name is not a symbol. + __ LoadP(entity_name, FieldMemOperand(entity_name, + HeapObject::kMapOffset)); + __ lbz(entity_name, + FieldMemOperand(entity_name, Map::kInstanceTypeOffset)); + __ andi(r0, entity_name, Operand(kIsSymbolMask)); + __ beq(miss, cr0); + + __ bind(&the_hole); + + // Restore the properties. + __ LoadP(properties, + FieldMemOperand(receiver, JSObject::kPropertiesOffset)); + } + } + + const int spill_mask = + (r0.bit() | r9.bit() | r8.bit() | r7.bit() | r6.bit() | + r5.bit() | r4.bit() | r3.bit()); + + __ mflr(r0); + __ MultiPush(spill_mask); + + __ LoadP(r3, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); + __ mov(r4, Operand(Handle(name))); + StringDictionaryLookupStub stub(NEGATIVE_LOOKUP); + __ CallStub(&stub); + __ cmpi(r3, Operand::Zero()); + + __ MultiPop(spill_mask); // MultiPop does not touch condition flags + __ mtlr(r0); + + __ beq(done); + __ bne(miss); +} + + +// Probe the string dictionary in the |elements| register. Jump to the +// |done| label if a property with the given name is found. Jump to +// the |miss| label otherwise. +// If lookup was successful |scratch2| will be equal to elements + 4 * index. +void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, + Label* miss, + Label* done, + Register elements, + Register name, + Register scratch1, + Register scratch2) { + ASSERT(!elements.is(scratch1)); + ASSERT(!elements.is(scratch2)); + ASSERT(!name.is(scratch1)); + ASSERT(!name.is(scratch2)); + + // Assert that name contains a string. + __ AssertString(name); + + // Compute the capacity mask. + __ LoadP(scratch1, FieldMemOperand(elements, kCapacityOffset)); + __ SmiUntag(scratch1); // convert smi to int + __ subi(scratch1, scratch1, Operand(1)); + + // Generate an unrolled loop that performs a few probes before + // giving up. Measurements done on Gmail indicate that 2 probes + // cover ~93% of loads from dictionaries. + for (int i = 0; i < kInlinedProbes; i++) { + // Compute the masked index: (hash + i + i * i) & mask. + __ lwz(scratch2, FieldMemOperand(name, String::kHashFieldOffset)); + if (i > 0) { + // Add the probe offset (i + i * i) left shifted to avoid right shifting + // the hash in a separate instruction. The value hash + i + i * i is right + // shifted in the following and instruction. + ASSERT(StringDictionary::GetProbeOffset(i) < + 1 << (32 - String::kHashFieldSlot)); + __ addi(scratch2, scratch2, Operand( + StringDictionary::GetProbeOffset(i) << String::kHashShift)); + } + __ srwi(scratch2, scratch2, Operand(String::kHashShift)); + __ and_(scratch2, scratch1, scratch2); + + // Scale the index by multiplying by the element size. + ASSERT(StringDictionary::kEntrySize == 3); + // scratch2 = scratch2 * 3. + __ ShiftLeftImm(ip, scratch2, Operand(1)); + __ add(scratch2, scratch2, ip); + + // Check if the key is identical to the name. + __ ShiftLeftImm(ip, scratch2, Operand(kPointerSizeLog2)); + __ add(scratch2, elements, ip); + __ LoadP(ip, FieldMemOperand(scratch2, kElementsStartOffset)); + __ cmp(name, ip); + __ beq(done); + } + + const int spill_mask = + (r0.bit() | r9.bit() | r8.bit() | r7.bit() | + r6.bit() | r5.bit() | r4.bit() | r3.bit()) & + ~(scratch1.bit() | scratch2.bit()); + + __ mflr(r0); + __ MultiPush(spill_mask); + if (name.is(r3)) { + ASSERT(!elements.is(r4)); + __ mr(r4, name); + __ mr(r3, elements); + } else { + __ mr(r3, elements); + __ mr(r4, name); + } + StringDictionaryLookupStub stub(POSITIVE_LOOKUP); + __ CallStub(&stub); + __ cmpi(r3, Operand::Zero()); + __ mr(scratch2, r5); + __ MultiPop(spill_mask); + __ mtlr(r0); + + __ bne(done); + __ beq(miss); +} + + +void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { + // This stub overrides SometimesSetsUpAFrame() to return false. That means + // we cannot call anything that could cause a GC from this stub. + // Registers: + // result: StringDictionary to probe + // r4: key + // : StringDictionary to probe. + // index_: will hold an index of entry if lookup is successful. + // might alias with result_. + // Returns: + // result_ is zero if lookup failed, non zero otherwise. + + Register result = r3; + Register dictionary = r3; + Register key = r4; + Register index = r5; + Register mask = r6; + Register hash = r7; + Register undefined = r8; + Register entry_key = r9; + Register scratch = r9; + + Label in_dictionary, maybe_in_dictionary, not_in_dictionary; + + __ LoadP(mask, FieldMemOperand(dictionary, kCapacityOffset)); + __ SmiUntag(mask); + __ subi(mask, mask, Operand(1)); + + __ lwz(hash, FieldMemOperand(key, String::kHashFieldOffset)); + + __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); + + for (int i = kInlinedProbes; i < kTotalProbes; i++) { + // Compute the masked index: (hash + i + i * i) & mask. + // Capacity is smi 2^n. + if (i > 0) { + // Add the probe offset (i + i * i) left shifted to avoid right shifting + // the hash in a separate instruction. The value hash + i + i * i is right + // shifted in the following and instruction. + ASSERT(StringDictionary::GetProbeOffset(i) < + 1 << (32 - String::kHashFieldSlot)); + __ addi(index, hash, Operand( + StringDictionary::GetProbeOffset(i) << String::kHashShift)); + } else { + __ mr(index, hash); + } + __ srwi(r0, index, Operand(String::kHashShift)); + __ and_(index, mask, r0); + + // Scale the index by multiplying by the entry size. + ASSERT(StringDictionary::kEntrySize == 3); + __ ShiftLeftImm(scratch, index, Operand(1)); + __ add(index, index, scratch); // index *= 3. + + ASSERT_EQ(kSmiTagSize, 1); + __ ShiftLeftImm(scratch, index, Operand(kPointerSizeLog2)); + __ add(index, dictionary, scratch); + __ LoadP(entry_key, FieldMemOperand(index, kElementsStartOffset)); + + // Having undefined at this place means the name is not contained. + __ cmp(entry_key, undefined); + __ beq(¬_in_dictionary); + + // Stop if found the property. + __ cmp(entry_key, key); + __ beq(&in_dictionary); + + if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) { + // Check if the entry name is not a symbol. + __ LoadP(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset)); + __ lbz(entry_key, + FieldMemOperand(entry_key, Map::kInstanceTypeOffset)); + __ andi(r0, entry_key, Operand(kIsSymbolMask)); + __ beq(&maybe_in_dictionary, cr0); + } + } + + __ bind(&maybe_in_dictionary); + // If we are doing negative lookup then probing failure should be + // treated as a lookup success. For positive lookup probing failure + // should be treated as lookup failure. + if (mode_ == POSITIVE_LOOKUP) { + __ li(result, Operand::Zero()); + __ Ret(); + } + + __ bind(&in_dictionary); + __ li(result, Operand(1)); + __ Ret(); + + __ bind(¬_in_dictionary); + __ li(result, Operand::Zero()); + __ Ret(); +} + + +struct AheadOfTimeWriteBarrierStubList { + Register object, value, address; + RememberedSetAction action; +}; + +#define REG(Name) { kRegister_ ## Name ## _Code } + +static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { + // Used in RegExpExecStub. + { REG(r28), REG(r26), REG(r10), EMIT_REMEMBERED_SET }, + { REG(r28), REG(r5), REG(r10), EMIT_REMEMBERED_SET }, + // Used in CompileArrayPushCall. + // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore. + // Also used in KeyedStoreIC::GenerateGeneric. + { REG(r6), REG(r7), REG(r8), EMIT_REMEMBERED_SET }, + // Used in CompileStoreGlobal. + { REG(r7), REG(r4), REG(r5), OMIT_REMEMBERED_SET }, + // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField. + { REG(r4), REG(r5), REG(r6), EMIT_REMEMBERED_SET }, + { REG(r6), REG(r5), REG(r4), EMIT_REMEMBERED_SET }, + // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField. + { REG(r5), REG(r4), REG(r6), EMIT_REMEMBERED_SET }, + { REG(r6), REG(r4), REG(r5), EMIT_REMEMBERED_SET }, + // KeyedStoreStubCompiler::GenerateStoreFastElement. + { REG(r6), REG(r5), REG(r7), EMIT_REMEMBERED_SET }, + { REG(r5), REG(r6), REG(r7), EMIT_REMEMBERED_SET }, + // ElementsTransitionGenerator::GenerateMapChangeElementTransition + // and ElementsTransitionGenerator::GenerateSmiToDouble + // and ElementsTransitionGenerator::GenerateDoubleToObject + { REG(r5), REG(r6), REG(r22), EMIT_REMEMBERED_SET }, + { REG(r5), REG(r6), REG(r22), OMIT_REMEMBERED_SET }, + // ElementsTransitionGenerator::GenerateDoubleToObject + { REG(r9), REG(r5), REG(r3), EMIT_REMEMBERED_SET }, + { REG(r5), REG(r9), REG(r22), EMIT_REMEMBERED_SET }, + // StoreArrayLiteralElementStub::Generate + { REG(r8), REG(r3), REG(r9), EMIT_REMEMBERED_SET }, + // FastNewClosureStub::Generate + { REG(r5), REG(r7), REG(r4), EMIT_REMEMBERED_SET }, + // Null termination. + { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET} +}; + +#undef REG + + +bool RecordWriteStub::IsPregenerated() { + for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; + !entry->object.is(no_reg); + entry++) { + if (object_.is(entry->object) && + value_.is(entry->value) && + address_.is(entry->address) && + remembered_set_action_ == entry->action && + save_fp_regs_mode_ == kDontSaveFPRegs) { + return true; + } + } + return false; +} + + +bool StoreBufferOverflowStub::IsPregenerated() { + return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated(); +} + + +void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() { + StoreBufferOverflowStub stub1(kDontSaveFPRegs); + stub1.GetCode()->set_is_pregenerated(true); +} + + +void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() { + for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; + !entry->object.is(no_reg); + entry++) { + RecordWriteStub stub(entry->object, + entry->value, + entry->address, + entry->action, + kDontSaveFPRegs); + stub.GetCode()->set_is_pregenerated(true); + } +} + + +bool CodeStub::CanUseFPRegisters() { + return true; +} + + +// Takes the input in 3 registers: address_ value_ and object_. A pointer to +// the value has just been written into the object, now this stub makes sure +// we keep the GC informed. The word in the object where the value has been +// written is in the address register. +void RecordWriteStub::Generate(MacroAssembler* masm) { + Label skip_to_incremental_noncompacting; + Label skip_to_incremental_compacting; + const int crBit = Assembler::encode_crbit(cr2, CR_LT); + + // The first two branch instructions are generated with labels so as to + // get the offset fixed up correctly by the bind(Label*) call. We patch + // it back and forth between branch condition True and False + // when we start and stop incremental heap marking. + // See RecordWriteStub::Patch for details. + + // Clear the bit, branch on True for NOP action initially + __ crxor(crBit, crBit, crBit); + __ blt(&skip_to_incremental_noncompacting, cr2); + __ blt(&skip_to_incremental_compacting, cr2); + + if (remembered_set_action_ == EMIT_REMEMBERED_SET) { + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + } + __ Ret(); + + __ bind(&skip_to_incremental_noncompacting); + GenerateIncremental(masm, INCREMENTAL); + + __ bind(&skip_to_incremental_compacting); + GenerateIncremental(masm, INCREMENTAL_COMPACTION); + + // Initial mode of the stub is expected to be STORE_BUFFER_ONLY. + // Will be checked in IncrementalMarking::ActivateGeneratedStub. + // patching not required on PPC as the initial path is effectively NOP +} + + +void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { + regs_.Save(masm); + + if (remembered_set_action_ == EMIT_REMEMBERED_SET) { + Label dont_need_remembered_set; + + __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0)); + __ JumpIfNotInNewSpace(regs_.scratch0(), // Value. + regs_.scratch0(), + &dont_need_remembered_set); + + __ CheckPageFlag(regs_.object(), + regs_.scratch0(), + 1 << MemoryChunk::SCAN_ON_SCAVENGE, + ne, + &dont_need_remembered_set); + + // First notify the incremental marker if necessary, then update the + // remembered set. + CheckNeedsToInformIncrementalMarker( + masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode); + InformIncrementalMarker(masm, mode); + regs_.Restore(masm); + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + + __ bind(&dont_need_remembered_set); + } + + CheckNeedsToInformIncrementalMarker( + masm, kReturnOnNoNeedToInformIncrementalMarker, mode); + InformIncrementalMarker(masm, mode); + regs_.Restore(masm); + __ Ret(); +} + + +void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { + regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); + int argument_count = 3; + __ PrepareCallCFunction(argument_count, regs_.scratch0()); + Register address = + r3.is(regs_.address()) ? regs_.scratch0() : regs_.address(); + ASSERT(!address.is(regs_.object())); + ASSERT(!address.is(r3)); + __ mr(address, regs_.address()); + __ mr(r3, regs_.object()); + if (mode == INCREMENTAL_COMPACTION) { + __ mr(r4, address); + } else { + ASSERT(mode == INCREMENTAL); + __ LoadP(r4, MemOperand(address, 0)); + } + __ mov(r5, Operand(ExternalReference::isolate_address())); + + AllowExternalCallThatCantCauseGC scope(masm); + if (mode == INCREMENTAL_COMPACTION) { + __ CallCFunction( + ExternalReference::incremental_evacuation_record_write_function( + masm->isolate()), + argument_count); + } else { + ASSERT(mode == INCREMENTAL); + __ CallCFunction( + ExternalReference::incremental_marking_record_write_function( + masm->isolate()), + argument_count); + } + regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); +} + + +void RecordWriteStub::CheckNeedsToInformIncrementalMarker( + MacroAssembler* masm, + OnNoNeedToInformIncrementalMarker on_no_need, + Mode mode) { + Label on_black; + Label need_incremental; + Label need_incremental_pop_scratch; + + ASSERT((~Page::kPageAlignmentMask & 0xffff) == 0); + __ lis(r0, Operand((~Page::kPageAlignmentMask >> 16))); + __ and_(regs_.scratch0(), regs_.object(), r0); + __ LoadP(regs_.scratch1(), + MemOperand(regs_.scratch0(), + MemoryChunk::kWriteBarrierCounterOffset)); + __ subi(regs_.scratch1(), regs_.scratch1(), Operand(1)); + __ StoreP(regs_.scratch1(), + MemOperand(regs_.scratch0(), + MemoryChunk::kWriteBarrierCounterOffset)); + __ cmpi(regs_.scratch1(), Operand::Zero()); // PPC, we could do better here + __ blt(&need_incremental); + + // Let's look at the color of the object: If it is not black we don't have + // to inform the incremental marker. + __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black); + + regs_.Restore(masm); + if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + } else { + __ Ret(); + } + + __ bind(&on_black); + + // Get the value from the slot. + __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0)); + + if (mode == INCREMENTAL_COMPACTION) { + Label ensure_not_white; + + __ CheckPageFlag(regs_.scratch0(), // Contains value. + regs_.scratch1(), // Scratch. + MemoryChunk::kEvacuationCandidateMask, + eq, + &ensure_not_white); + + __ CheckPageFlag(regs_.object(), + regs_.scratch1(), // Scratch. + MemoryChunk::kSkipEvacuationSlotsRecordingMask, + eq, + &need_incremental); + + __ bind(&ensure_not_white); + } + + // We need extra registers for this, so we push the object and the address + // register temporarily. + __ Push(regs_.object(), regs_.address()); + __ EnsureNotWhite(regs_.scratch0(), // The value. + regs_.scratch1(), // Scratch. + regs_.object(), // Scratch. + regs_.address(), // Scratch. + &need_incremental_pop_scratch); + __ Pop(regs_.object(), regs_.address()); + + regs_.Restore(masm); + if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + } else { + __ Ret(); + } + + __ bind(&need_incremental_pop_scratch); + __ Pop(regs_.object(), regs_.address()); + + __ bind(&need_incremental); + + // Fall through when we need to inform the incremental marker. +} + + +void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- r3 : element value to store + // -- r4 : array literal + // -- r5 : map of array literal + // -- r6 : element index as smi + // -- r7 : array literal index in function as smi + // ----------------------------------- + + Label element_done; + Label double_elements; + Label smi_element; + Label slow_elements; + Label fast_elements; + + __ CheckFastElements(r5, r8, &double_elements); + // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS + __ JumpIfSmi(r3, &smi_element); + __ CheckFastSmiElements(r5, r8, &fast_elements); + + // Store into the array literal requires a elements transition. Call into + // the runtime. + __ bind(&slow_elements); + // call. + __ Push(r4, r6, r3); + __ LoadP(r8, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); + __ LoadP(r8, FieldMemOperand(r8, JSFunction::kLiteralsOffset)); + __ Push(r8, r7); + __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1); + + // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object. + __ bind(&fast_elements); + __ LoadP(r8, FieldMemOperand(r4, JSObject::kElementsOffset)); + __ SmiToPtrArrayOffset(r9, r6); + __ add(r9, r8, r9); +#if V8_TARGET_ARCH_PPC64 + // add due to offset alignment requirements of StorePU + __ addi(r9, r9, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ StoreP(r3, MemOperand(r9)); +#else + __ StorePU(r3, MemOperand(r9, FixedArray::kHeaderSize - kHeapObjectTag)); +#endif + // Update the write barrier for the array store. + __ RecordWrite(r8, r9, r3, kLRHasNotBeenSaved, kDontSaveFPRegs, + EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + __ Ret(); + + // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS, + // and value is Smi. + __ bind(&smi_element); + __ LoadP(r8, FieldMemOperand(r4, JSObject::kElementsOffset)); + __ SmiToPtrArrayOffset(r9, r6); + __ add(r9, r8, r9); + __ StoreP(r3, FieldMemOperand(r9, FixedArray::kHeaderSize), r0); + __ Ret(); + + // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS. + __ bind(&double_elements); + __ LoadP(r8, FieldMemOperand(r4, JSObject::kElementsOffset)); + __ StoreNumberToDoubleElements(r3, r6, r4, + // Overwrites all regs after this. + r8, r9, r10, r22, r5, + &slow_elements); + __ Ret(); +} + + +void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { + if (entry_hook_ != NULL) { + ProfileEntryHookStub stub; + __ mflr(r0); + __ push(r0); + __ CallStub(&stub); + __ pop(r0); + __ mtlr(r0); + } +} + + +void ProfileEntryHookStub::Generate(MacroAssembler* masm) { + // The entry hook is a "push lr" instruction, followed by a call. + const int32_t kReturnAddressDistanceFromFunctionStart = + Assembler::kCallTargetAddressOffset + 2 * Assembler::kInstrSize; + + // Save live volatile registers. + __ mflr(r3); + __ Push(r3, r30, r4); + const int32_t kNumSavedRegs = 3; + + // Compute the function's address for the first argument. + __ subi(r3, r3, Operand(kReturnAddressDistanceFromFunctionStart)); + + // The caller's return address is above the saved temporaries. + // Grab that for the second argument to the hook. + __ addi(r4, sp, Operand(kNumSavedRegs * kPointerSize)); + + // Align the stack if necessary. + int frame_alignment = masm->ActivationFrameAlignment(); + if (frame_alignment > kPointerSize) { + __ mr(r30, sp); + ASSERT(IsPowerOf2(frame_alignment)); + ASSERT(-frame_alignment == -8); + __ ClearRightImm(sp, sp, Operand(3)); + } + +#if !defined(USE_SIMULATOR) + __ mov(ip, Operand(reinterpret_cast(&entry_hook_))); + __ LoadP(ip, MemOperand(ip)); + +#if ABI_USES_FUNCTION_DESCRIPTORS + // Function descriptor + __ LoadP(ToRegister(2), MemOperand(ip, kPointerSize)); + __ LoadP(ip, MemOperand(ip, 0)); +#elif ABI_TOC_ADDRESSABILITY_VIA_IP + // ip already set. +#endif + + // PPC LINUX ABI: + __ addi(sp, sp, Operand(-kNumRequiredStackFrameSlots * kPointerSize)); +#else + // Under the simulator we need to indirect the entry hook through a + // trampoline function at a known address. + Address trampoline_address = reinterpret_cast
( + reinterpret_cast(EntryHookTrampoline)); + ApiFunction dispatcher(trampoline_address); + __ mov(ip, Operand(ExternalReference(&dispatcher, + ExternalReference::BUILTIN_CALL, + masm->isolate()))); +#endif + __ Call(ip); + +// For the most part this is true only when USE_SIMULATOR is true +// The exception is when built with nativesim=true, then we need +// Real PPC calling support plus simulation +#if defined(V8_HOST_ARCH_PPC64) || defined(V8_HOST_ARCH_PPC) + __ addi(sp, sp, Operand(kNumRequiredStackFrameSlots * kPointerSize)); +#endif + + // Restore the stack pointer if needed. + if (frame_alignment > kPointerSize) { + __ mr(sp, r30); + } + + __ Pop(r0, r30, r4); + __ mtlr(r0); + __ Ret(); +} + +#undef __ +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_PPC diff --git a/src/ppc/code-stubs-ppc.h b/src/ppc/code-stubs-ppc.h new file mode 100644 index 0000000..0b24a73 --- /dev/null +++ b/src/ppc/code-stubs-ppc.h @@ -0,0 +1,884 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// +// Copyright IBM Corp. 2012, 2013. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_PPC_CODE_STUBS_PPC_H_ +#define V8_PPC_CODE_STUBS_PPC_H_ + +#include "ic-inl.h" + +namespace v8 { +namespace internal { + +// Compute a transcendental math function natively, or call the +// TranscendentalCache runtime function. +class TranscendentalCacheStub: public CodeStub { + public: + enum ArgumentType { + TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits, + UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits + }; + + TranscendentalCacheStub(TranscendentalCache::Type type, + ArgumentType argument_type) + : type_(type), argument_type_(argument_type) { } + void Generate(MacroAssembler* masm); + private: + TranscendentalCache::Type type_; + ArgumentType argument_type_; + void GenerateCallCFunction(MacroAssembler* masm, Register scratch); + + Major MajorKey() { return TranscendentalCache; } + int MinorKey() { return type_ | argument_type_; } + Runtime::FunctionId RuntimeFunction(); +}; + + +class StoreBufferOverflowStub: public CodeStub { + public: + explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) + : save_doubles_(save_fp) { } + + void Generate(MacroAssembler* masm); + + virtual bool IsPregenerated(); + static void GenerateFixedRegStubsAheadOfTime(); + virtual bool SometimesSetsUpAFrame() { return false; } + + private: + SaveFPRegsMode save_doubles_; + + Major MajorKey() { return StoreBufferOverflow; } + int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; } +}; + + +class UnaryOpStub: public CodeStub { + public: + UnaryOpStub(Token::Value op, + UnaryOverwriteMode mode, + UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED) + : op_(op), + mode_(mode), + operand_type_(operand_type) { + } + + private: + Token::Value op_; + UnaryOverwriteMode mode_; + + // Operand type information determined at runtime. + UnaryOpIC::TypeInfo operand_type_; + + virtual void PrintName(StringStream* stream); + + class ModeBits: public BitField {}; + class OpBits: public BitField {}; + class OperandTypeInfoBits: public BitField {}; + + Major MajorKey() { return UnaryOp; } + int MinorKey() { + return ModeBits::encode(mode_) + | OpBits::encode(op_) + | OperandTypeInfoBits::encode(operand_type_); + } + + // Note: A lot of the helper functions below will vanish when we use virtual + // function instead of switch more often. + void Generate(MacroAssembler* masm); + + void GenerateTypeTransition(MacroAssembler* masm); + + void GenerateSmiStub(MacroAssembler* masm); + void GenerateSmiStubSub(MacroAssembler* masm); + void GenerateSmiStubBitNot(MacroAssembler* masm); + void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow); + void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow); + + void GenerateHeapNumberStub(MacroAssembler* masm); + void GenerateHeapNumberStubSub(MacroAssembler* masm); + void GenerateHeapNumberStubBitNot(MacroAssembler* masm); + void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow); + void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow); + + void GenerateGenericStub(MacroAssembler* masm); + void GenerateGenericStubSub(MacroAssembler* masm); + void GenerateGenericStubBitNot(MacroAssembler* masm); + void GenerateGenericCodeFallback(MacroAssembler* masm); + + virtual int GetCodeKind() { return Code::UNARY_OP_IC; } + + virtual InlineCacheState GetICState() { + return UnaryOpIC::ToState(operand_type_); + } + + virtual void FinishCode(Handle code) { + code->set_unary_op_type(operand_type_); + } +}; + + +class BinaryOpStub: public CodeStub { + public: + BinaryOpStub(Token::Value op, OverwriteMode mode) + : op_(op), + mode_(mode), + operands_type_(BinaryOpIC::UNINITIALIZED), + result_type_(BinaryOpIC::UNINITIALIZED) { + ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); + } + + BinaryOpStub( + int key, + BinaryOpIC::TypeInfo operands_type, + BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED) + : op_(OpBits::decode(key)), + mode_(ModeBits::decode(key)), + operands_type_(operands_type), + result_type_(result_type) { } + + private: + enum SmiCodeGenerateHeapNumberResults { + ALLOW_HEAPNUMBER_RESULTS, + NO_HEAPNUMBER_RESULTS + }; + + Token::Value op_; + OverwriteMode mode_; + + // Operand type information determined at runtime. + BinaryOpIC::TypeInfo operands_type_; + BinaryOpIC::TypeInfo result_type_; + + virtual void PrintName(StringStream* stream); + + // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM. + class ModeBits: public BitField {}; + class OpBits: public BitField {}; + class OperandTypeInfoBits: public BitField {}; + class ResultTypeInfoBits: public BitField {}; + + Major MajorKey() { return BinaryOp; } + int MinorKey() { + return OpBits::encode(op_) + | ModeBits::encode(mode_) + | OperandTypeInfoBits::encode(operands_type_) + | ResultTypeInfoBits::encode(result_type_); + } + + void Generate(MacroAssembler* masm); + void GenerateGeneric(MacroAssembler* masm); + void GenerateSmiSmiOperation(MacroAssembler* masm); + void GenerateFPOperation(MacroAssembler* masm, + bool smi_operands, + Label* not_numbers, + Label* gc_required); + void GenerateSmiCode(MacroAssembler* masm, + Label* use_runtime, + Label* gc_required, + SmiCodeGenerateHeapNumberResults heapnumber_results); + void GenerateLoadArguments(MacroAssembler* masm); + void GenerateReturn(MacroAssembler* masm); + void GenerateUninitializedStub(MacroAssembler* masm); + void GenerateSmiStub(MacroAssembler* masm); + void GenerateInt32Stub(MacroAssembler* masm); + void GenerateHeapNumberStub(MacroAssembler* masm); + void GenerateOddballStub(MacroAssembler* masm); + void GenerateStringStub(MacroAssembler* masm); + void GenerateBothStringStub(MacroAssembler* masm); + void GenerateGenericStub(MacroAssembler* masm); + void GenerateAddStrings(MacroAssembler* masm); + void GenerateCallRuntime(MacroAssembler* masm); + + void GenerateHeapResultAllocation(MacroAssembler* masm, + Register result, + Register heap_number_map, + Register scratch1, + Register scratch2, + Label* gc_required); + void GenerateRegisterArgsPush(MacroAssembler* masm); + void GenerateTypeTransition(MacroAssembler* masm); + void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm); + + virtual int GetCodeKind() { return Code::BINARY_OP_IC; } + + virtual InlineCacheState GetICState() { + return BinaryOpIC::ToState(operands_type_); + } + + virtual void FinishCode(Handle code) { + code->set_binary_op_type(operands_type_); + code->set_binary_op_result_type(result_type_); + } + + friend class CodeGenerator; +}; + + +class StringHelper : public AllStatic { + public: + // Generate code for copying characters using a simple loop. This should only + // be used in places where the number of characters is small and the + // additional setup and checking in GenerateCopyCharactersLong adds too much + // overhead. Copying of overlapping regions is not supported. + // Dest register ends at the position after the last character written. + static void GenerateCopyCharacters(MacroAssembler* masm, + Register dest, + Register src, + Register count, + Register scratch, + bool ascii); + + // Generate code for copying a large number of characters. This function + // is allowed to spend extra time setting up conditions to make copying + // faster. Copying of overlapping regions is not supported. + // Dest register ends at the position after the last character written. + static void GenerateCopyCharactersLong(MacroAssembler* masm, + Register dest, + Register src, + Register count, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Register scratch5, + int flags); + + + // Probe the symbol table for a two character string. If the string is + // not found by probing a jump to the label not_found is performed. This jump + // does not guarantee that the string is not in the symbol table. If the + // string is found the code falls through with the string in register r0. + // Contents of both c1 and c2 registers are modified. At the exit c1 is + // guaranteed to contain halfword with low and high bytes equal to + // initial contents of c1 and c2 respectively. + static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, + Register c1, + Register c2, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Register scratch5, + Label* not_found); + + // Generate string hash. + static void GenerateHashInit(MacroAssembler* masm, + Register hash, + Register character, + Register scratch); + + static void GenerateHashAddCharacter(MacroAssembler* masm, + Register hash, + Register character, + Register scratch); + + static void GenerateHashGetHash(MacroAssembler* masm, + Register hash, + Register scratch); + + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper); +}; + + +// Flag that indicates how to generate code for the stub StringAddStub. +enum StringAddFlags { + NO_STRING_ADD_FLAGS = 0, + // Omit left string check in stub (left is definitely a string). + NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0, + // Omit right string check in stub (right is definitely a string). + NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1, + // Omit both string checks in stub. + NO_STRING_CHECK_IN_STUB = + NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB +}; + + +class StringAddStub: public CodeStub { + public: + explicit StringAddStub(StringAddFlags flags) : flags_(flags) {} + + private: + Major MajorKey() { return StringAdd; } + int MinorKey() { return flags_; } + + void Generate(MacroAssembler* masm); + + void GenerateConvertArgument(MacroAssembler* masm, + int stack_offset, + Register arg, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Label* slow); + + const StringAddFlags flags_; +}; + + +class SubStringStub: public CodeStub { + public: + SubStringStub() {} + + private: + Major MajorKey() { return SubString; } + int MinorKey() { return 0; } + + void Generate(MacroAssembler* masm); +}; + + + +class StringCompareStub: public CodeStub { + public: + StringCompareStub() { } + + // Compares two flat ASCII strings and returns result in r0. + static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm, + Register left, + Register right, + Register scratch1, + Register scratch2, + Register scratch3); + + // Compares two flat ASCII strings for equality and returns result + // in r0. + static void GenerateFlatAsciiStringEquals(MacroAssembler* masm, + Register left, + Register right, + Register scratch1, + Register scratch2); + + private: + virtual Major MajorKey() { return StringCompare; } + virtual int MinorKey() { return 0; } + virtual void Generate(MacroAssembler* masm); + + static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm, + Register left, + Register right, + Register length, + Register scratch1, + Label* chars_not_equal); +}; + + +class NumberToStringStub: public CodeStub { + public: + NumberToStringStub() { } + + // Generate code to do a lookup in the number string cache. If the number in + // the register object is found in the cache the generated code falls through + // with the result in the result register. The object and the result register + // can be the same. If the number is not found in the cache the code jumps to + // the label not_found with only the content of register object unchanged. + static void GenerateLookupNumberStringCache(MacroAssembler* masm, + Register object, + Register result, + Register scratch1, + Register scratch2, + Register scratch3, + bool object_is_smi, + Label* not_found); + + private: + Major MajorKey() { return NumberToString; } + int MinorKey() { return 0; } + + void Generate(MacroAssembler* masm); +}; + + +class RecordWriteStub: public CodeStub { + public: + RecordWriteStub(Register object, + Register value, + Register address, + RememberedSetAction remembered_set_action, + SaveFPRegsMode fp_mode) + : object_(object), + value_(value), + address_(address), + remembered_set_action_(remembered_set_action), + save_fp_regs_mode_(fp_mode), + regs_(object, // An input reg. + address, // An input reg. + value) { // One scratch reg. + } + + enum Mode { + STORE_BUFFER_ONLY, + INCREMENTAL, + INCREMENTAL_COMPACTION + }; + + virtual bool IsPregenerated(); + static void GenerateFixedRegStubsAheadOfTime(); + virtual bool SometimesSetsUpAFrame() { return false; } + + static void PatchBranchIntoNop(MacroAssembler* masm, int pos) { + masm->instr_at_put(pos, (masm->instr_at(pos) & ~kBOfieldMask) | BT); + // roohack ASSERT(Assembler::IsTstImmediate(masm->instr_at(pos))); + } + + static void PatchNopIntoBranch(MacroAssembler* masm, int pos) { + masm->instr_at_put(pos, (masm->instr_at(pos) & ~kBOfieldMask) | BF); + // roohack ASSERT(Assembler::IsBranch(masm->instr_at(pos))); + } + + static Mode GetMode(Code* stub) { + Instr first_instruction = Assembler::instr_at(stub->instruction_start() + + Assembler::kInstrSize); + Instr second_instruction = Assembler::instr_at(stub->instruction_start() + + (Assembler::kInstrSize*2)); + + if (BF == (first_instruction & kBOfieldMask)) { + return INCREMENTAL; + } + + // roohack ASSERT(Assembler::IsTstImmediate(first_instruction)); + + if (BF == (second_instruction & kBOfieldMask)) { + return INCREMENTAL_COMPACTION; + } + + // roohack ASSERT(Assembler::IsTstImmediate(second_instruction)); + + return STORE_BUFFER_ONLY; + } + + static void Patch(Code* stub, Mode mode) { + MacroAssembler masm(NULL, + stub->instruction_start(), + stub->instruction_size()); + switch (mode) { + case STORE_BUFFER_ONLY: + ASSERT(GetMode(stub) == INCREMENTAL || + GetMode(stub) == INCREMENTAL_COMPACTION); + + PatchBranchIntoNop(&masm, Assembler::kInstrSize); + PatchBranchIntoNop(&masm, Assembler::kInstrSize*2); + break; + case INCREMENTAL: + ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); + PatchNopIntoBranch(&masm, Assembler::kInstrSize); + break; + case INCREMENTAL_COMPACTION: + ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); + PatchNopIntoBranch(&masm, Assembler::kInstrSize*2); + break; + } + ASSERT(GetMode(stub) == mode); + CPU::FlushICache(stub->instruction_start()+Assembler::kInstrSize, + 2 * Assembler::kInstrSize); + } + + private: + // This is a helper class for freeing up 3 scratch registers. The input is + // two registers that must be preserved and one scratch register provided by + // the caller. + class RegisterAllocation { + public: + RegisterAllocation(Register object, + Register address, + Register scratch0) + : object_(object), + address_(address), + scratch0_(scratch0) { + ASSERT(!AreAliased(scratch0, object, address, no_reg)); + scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_); + } + + void Save(MacroAssembler* masm) { + ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_)); + // We don't have to save scratch0_ because it was given to us as + // a scratch register. + masm->push(scratch1_); + } + + void Restore(MacroAssembler* masm) { + masm->pop(scratch1_); + } + + // If we have to call into C then we need to save and restore all caller- + // saved registers that were not already preserved. The scratch registers + // will be restored by other means so we don't bother pushing them here. + void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) { + masm->mflr(r0); + masm->push(r0); + masm->MultiPush(kJSCallerSaved & ~scratch1_.bit()); + if (mode == kSaveFPRegs) { + // Save all volatile VFP registers except d0. + const int kNumRegs = DwVfpRegister::kNumVolatileRegisters - 1; + masm->subi(sp, sp, Operand(kDoubleSize * kNumRegs)); + for (int i = kNumRegs; i > 0; i--) { + DwVfpRegister reg = DwVfpRegister::from_code(i); + masm->stfd(reg, MemOperand(sp, (i - 1) * kDoubleSize)); + } + } + } + + inline void RestoreCallerSaveRegisters(MacroAssembler*masm, + SaveFPRegsMode mode) { + if (mode == kSaveFPRegs) { + // Restore all VFP registers except d0. + const int kNumRegs = DwVfpRegister::kNumVolatileRegisters - 1; + for (int i = kNumRegs; i > 0; i--) { + DwVfpRegister reg = DwVfpRegister::from_code(i); + masm->lfd(reg, MemOperand(sp, (i - 1) * kDoubleSize)); + } + masm->addi(sp, sp, Operand(kDoubleSize * kNumRegs)); + } + masm->MultiPop(kJSCallerSaved & ~scratch1_.bit()); + masm->pop(r0); + masm->mtlr(r0); + } + + inline Register object() { return object_; } + inline Register address() { return address_; } + inline Register scratch0() { return scratch0_; } + inline Register scratch1() { return scratch1_; } + + private: + Register object_; + Register address_; + Register scratch0_; + Register scratch1_; + + Register GetRegThatIsNotOneOf(Register r1, + Register r2, + Register r3) { + for (int i = 0; i < Register::kNumAllocatableRegisters; i++) { + Register candidate = Register::FromAllocationIndex(i); + if (candidate.is(r1)) continue; + if (candidate.is(r2)) continue; + if (candidate.is(r3)) continue; + return candidate; + } + UNREACHABLE(); + return no_reg; + } + friend class RecordWriteStub; + }; + + enum OnNoNeedToInformIncrementalMarker { + kReturnOnNoNeedToInformIncrementalMarker, + kUpdateRememberedSetOnNoNeedToInformIncrementalMarker + }; + + void Generate(MacroAssembler* masm); + void GenerateIncremental(MacroAssembler* masm, Mode mode); + void CheckNeedsToInformIncrementalMarker( + MacroAssembler* masm, + OnNoNeedToInformIncrementalMarker on_no_need, + Mode mode); + void InformIncrementalMarker(MacroAssembler* masm, Mode mode); + + Major MajorKey() { return RecordWrite; } + + int MinorKey() { + return ObjectBits::encode(object_.code()) | + ValueBits::encode(value_.code()) | + AddressBits::encode(address_.code()) | + RememberedSetActionBits::encode(remembered_set_action_) | + SaveFPRegsModeBits::encode(save_fp_regs_mode_); + } + + void Activate(Code* code) { + code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code); + } + + class ObjectBits: public BitField {}; + class ValueBits: public BitField {}; + class AddressBits: public BitField {}; + class RememberedSetActionBits: public BitField {}; + class SaveFPRegsModeBits: public BitField {}; + + Register object_; + Register value_; + Register address_; + RememberedSetAction remembered_set_action_; + SaveFPRegsMode save_fp_regs_mode_; + Label slow_; + RegisterAllocation regs_; +}; + + +// Enter C code from generated RegExp code in a way that allows +// the C code to fix the return address in case of a GC. +class RegExpCEntryStub: public CodeStub { + public: + RegExpCEntryStub() {} + virtual ~RegExpCEntryStub() {} + void Generate(MacroAssembler* masm); + + private: + Major MajorKey() { return RegExpCEntry; } + int MinorKey() { return 0; } + + bool NeedsImmovableCode() { return true; } +}; + + +// Trampoline stub to call into native code. To call safely into native code +// in the presence of compacting GC (which can move code objects) we need to +// keep the code which called into native pinned in the memory. Currently the +// simplest approach is to generate such stub early enough so it can never be +// moved by GC +class DirectCEntryStub: public CodeStub { + public: + DirectCEntryStub() {} + void Generate(MacroAssembler* masm); + void GenerateCall(MacroAssembler* masm, ExternalReference function); + void GenerateCall(MacroAssembler* masm, Register target); + + private: + Major MajorKey() { return DirectCEntry; } + int MinorKey() { return 0; } + + bool NeedsImmovableCode() { return true; } +}; + + +class FloatingPointHelper : public AllStatic { + public: + enum Destination { + kFPRegisters, + kCoreRegisters + }; + + + // Loads smis from r0 and r1 (right and left in binary operations) into + // floating point registers. Depending on the destination the values ends up + // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is + // floating point registers VFP3 must be supported. If core registers are + // requested when VFP3 is supported d6 and d7 will be scratched. + static void LoadSmis(MacroAssembler* masm, + Register scratch1, + Register scratch2); + + // Loads objects from r0 and r1 (right and left in binary operations) into + // floating point registers. Depending on the destination the values ends up + // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is + // floating point registers VFP3 must be supported. If core registers are + // requested when VFP3 is supported d6 and d7 will still be scratched. If + // either r0 or r1 is not a number (not smi and not heap number object) the + // not_number label is jumped to with r0 and r1 intact. + static void LoadOperands(MacroAssembler* masm, + Register heap_number_map, + Register scratch1, + Register scratch2, + Label* not_number); + + // Convert the smi or heap number in object to an int32 using the rules + // for ToInt32 as described in ECMAScript 9.5.: the value is truncated + // and brought into the range -2^31 .. +2^31 - 1. + static void ConvertNumberToInt32(MacroAssembler* masm, + Register object, + Register dst, + Register heap_number_map, + Register scratch1, + Register scratch2, + Register scratch3, + DwVfpRegister double_scratch, + Label* not_int32); + + // Converts the integer (untagged smi) in |src| to a double, storing + // the result to |double_dst| + static void ConvertIntToDouble(MacroAssembler* masm, + Register src, + DwVfpRegister double_dst); + + // Converts the unsigned integer (untagged smi) in |src| to + // a double, storing the result to |double_dst| + static void ConvertUnsignedIntToDouble(MacroAssembler* masm, + Register src, + DwVfpRegister double_dst); + + // Converts the integer (untagged smi) in |src| to + // a float, storing the result in |dst| + // Warning: The value in |int_scrach| will be changed in the process! + static void ConvertIntToFloat(MacroAssembler* masm, + const DwVfpRegister dst, + const Register src, + const Register int_scratch); + + /* + // Converts the double in |double_value| to an integer, storing the + // result in |int_dst|. + // Warning: The value in |double_value| will be changed in the process! + static void ConvertDoubleToInt(MacroAssembler* masm, + DwVfpRegister double_value, + Register int_dst, + Register scratch1, + DwVfpRegister double_scratch); + + // Converts the double in |double_value| to an unsigned integer, + // storing the result in |int_dst|. + // Warning: The value in |double_value| will be changed in the process! + static void ConvertDoubleToUnsignedInt(MacroAssembler* masm, + DwVfpRegister double_value, + Register int_dst, + Register scratch1, + DwVfpRegister double_scratch); + */ + + // Load the number from object into double_dst in the double format. + // Control will jump to not_int32 if the value cannot be exactly represented + // by a 32-bit integer. + // Floating point value in the 32-bit integer range that are not exact integer + // won't be loaded. + static void LoadNumberAsInt32Double(MacroAssembler* masm, + Register object, + DwVfpRegister double_dst, + DwVfpRegister double_scratch, + Register heap_number_map, + Register scratch1, + Register scratch2, + Label* not_int32); + + // Loads the number from object into dst as a 32-bit integer. + // Control will jump to not_int32 if the object cannot be exactly represented + // by a 32-bit integer. + // Floating point value in the 32-bit integer range that are not exact integer + // won't be converted. + // scratch3 is not used when VFP3 is supported. + static void LoadNumberAsInt32(MacroAssembler* masm, + Register object, + Register dst, + Register heap_number_map, + Register scratch1, + Register scratch2, + Register scratch3, + DwVfpRegister double_scratch0, + DwVfpRegister double_scratch1, + Label* not_int32); + + // Generate non VFP3 code to check if a double can be exactly represented by a + // 32-bit integer. This does not check for 0 or -0, which need + // to be checked for separately. + // Control jumps to not_int32 if the value is not a 32-bit integer, and falls + // through otherwise. + // src1 and src2 will be cloberred. + // + // Expected input: + // - src1: higher (exponent) part of the double value. + // - src2: lower (mantissa) part of the double value. + // Output status: + // - dst: 32 higher bits of the mantissa. (mantissa[51:20]) + // - src2: contains 1. + // - other registers are clobbered. + static void DoubleIs32BitInteger(MacroAssembler* masm, + Register src1, + Register src2, + Register dst, + Register scratch, + Label* not_int32); + + // Generates code to call a C function to do a double operation using core + // registers. (Used when VFP3 is not supported.) + // This code never falls through, but returns with a heap number containing + // the result in r0. + // Register heapnumber_result must be a heap number in which the + // result of the operation will be stored. + // Requires the following layout on entry: + // r0: Left value (least significant part of mantissa). + // r1: Left value (sign, exponent, top of mantissa). + // r2: Right value (least significant part of mantissa). + // r3: Right value (sign, exponent, top of mantissa). + static void CallCCodeForDoubleOperation(MacroAssembler* masm, + Token::Value op, + Register heap_number_result, + Register scratch); + + private: + static void LoadNumber(MacroAssembler* masm, + Register object, + DwVfpRegister dst, + Register heap_number_map, + Register scratch1, + Register scratch2, + Label* not_number); +}; + + +class StringDictionaryLookupStub: public CodeStub { + public: + enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP }; + + explicit StringDictionaryLookupStub(LookupMode mode) : mode_(mode) { } + + void Generate(MacroAssembler* masm); + + static void GenerateNegativeLookup(MacroAssembler* masm, + Label* miss, + Label* done, + Register receiver, + Register properties, + Handle name, + Register scratch0); + + static void GeneratePositiveLookup(MacroAssembler* masm, + Label* miss, + Label* done, + Register elements, + Register name, + Register r0, + Register r1); + + virtual bool SometimesSetsUpAFrame() { return false; } + + private: + static const int kInlinedProbes = 4; + static const int kTotalProbes = 20; + + static const int kCapacityOffset = + StringDictionary::kHeaderSize + + StringDictionary::kCapacityIndex * kPointerSize; + + static const int kElementsStartOffset = + StringDictionary::kHeaderSize + + StringDictionary::kElementsStartIndex * kPointerSize; + + Major MajorKey() { return StringDictionaryLookup; } + + int MinorKey() { + return LookupModeBits::encode(mode_); + } + + class LookupModeBits: public BitField {}; + + LookupMode mode_; +}; + +} } // namespace v8::internal + +#endif // V8_PPC_CODE_STUBS_PPC_H_ diff --git a/src/ppc/codegen-ppc.cc b/src/ppc/codegen-ppc.cc new file mode 100644 index 0000000..47092df --- /dev/null +++ b/src/ppc/codegen-ppc.cc @@ -0,0 +1,492 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// +// Copyright IBM Corp. 2012, 2013. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "v8.h" + +#if defined(V8_TARGET_ARCH_PPC) + +#include "codegen.h" +#include "macro-assembler.h" + +namespace v8 { +namespace internal { + +#define __ ACCESS_MASM(masm) + +UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) { + switch (type) { + case TranscendentalCache::SIN: return &sin; + case TranscendentalCache::COS: return &cos; + case TranscendentalCache::TAN: return &tan; + case TranscendentalCache::LOG: return &log; + default: UNIMPLEMENTED(); + } + return NULL; +} + + +UnaryMathFunction CreateSqrtFunction() { + return &sqrt; +} + +// ------------------------------------------------------------------------- +// Platform-specific RuntimeCallHelper functions. + +void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { + masm->EnterFrame(StackFrame::INTERNAL); + ASSERT(!masm->has_frame()); + masm->set_has_frame(true); +} + + +void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { + masm->LeaveFrame(StackFrame::INTERNAL); + ASSERT(masm->has_frame()); + masm->set_has_frame(false); +} + + +// ------------------------------------------------------------------------- +// Code generators + +void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( + MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- r3 : value + // -- r4 : key + // -- r5 : receiver + // -- lr : return address + // -- r6 : target map, scratch for subsequent call + // -- r7 : scratch (elements) + // ----------------------------------- + // Set transitioned map. + __ StoreP(r6, FieldMemOperand(r5, HeapObject::kMapOffset), r0); + __ RecordWriteField(r5, + HeapObject::kMapOffset, + r6, + r22, + kLRHasNotBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); +} + + +void ElementsTransitionGenerator::GenerateSmiToDouble( + MacroAssembler* masm, Label* fail) { + // ----------- S t a t e ------------- + // -- r3 : value + // -- r4 : key + // -- r5 : receiver + // -- lr : return address + // -- r6 : target map, scratch for subsequent call + // -- r7 : scratch (elements) + // ----------------------------------- + Label loop, entry, convert_hole, gc_required, only_change_map, done; + + // Check for empty arrays, which only require a map transition and no changes + // to the backing store. + __ LoadP(r7, FieldMemOperand(r5, JSObject::kElementsOffset)); + __ CompareRoot(r7, Heap::kEmptyFixedArrayRootIndex); + __ beq(&only_change_map); + + // Preserve lr and use r30 as a temporary register. + __ mflr(r0); + __ Push(r0, r30); + + __ LoadP(r8, FieldMemOperand(r7, FixedArray::kLengthOffset)); + // r7: source FixedArray + // r8: number of elements (smi-tagged) + + // Allocate new FixedDoubleArray. + __ SmiToDoubleArrayOffset(r30, r8); + __ addi(r30, r30, Operand(FixedDoubleArray::kHeaderSize + kPointerSize)); + __ AllocateInNewSpace(r30, r9, r10, r22, &gc_required, NO_ALLOCATION_FLAGS); + // r9: destination FixedDoubleArray, not tagged as heap object. + + // Align the array conveniently for doubles. + // Store a filler value in the unused memory. + Label aligned, aligned_done; + __ andi(r0, r9, Operand(kDoubleAlignmentMask)); + __ mov(ip, Operand(masm->isolate()->factory()->one_pointer_filler_map())); + __ beq(&aligned, cr0); + // Store at the beginning of the allocated memory and update the base pointer. + __ StoreP(ip, MemOperand(r9)); + __ addi(r9, r9, Operand(kPointerSize)); + __ b(&aligned_done); + + __ bind(&aligned); + // Store the filler at the end of the allocated memory. + __ subi(r30, r30, Operand(kPointerSize)); + __ StorePX(ip, MemOperand(r9, r30)); + + __ bind(&aligned_done); + + // Set destination FixedDoubleArray's length and map. + __ LoadRoot(r22, Heap::kFixedDoubleArrayMapRootIndex); + __ StoreP(r8, MemOperand(r9, FixedDoubleArray::kLengthOffset)); + // Update receiver's map. + __ StoreP(r22, MemOperand(r9, HeapObject::kMapOffset)); + + __ StoreP(r6, FieldMemOperand(r5, HeapObject::kMapOffset), r0); + __ RecordWriteField(r5, + HeapObject::kMapOffset, + r6, + r22, + kLRHasBeenSaved, + kDontSaveFPRegs, + OMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + // Replace receiver's backing store with newly created FixedDoubleArray. + __ addi(r6, r9, Operand(kHeapObjectTag)); + __ StoreP(r6, FieldMemOperand(r5, JSObject::kElementsOffset), r0); + __ RecordWriteField(r5, + JSObject::kElementsOffset, + r6, + r22, + kLRHasBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + + // Prepare for conversion loop. + __ addi(r6, r7, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ addi(r10, r9, Operand(FixedDoubleArray::kHeaderSize)); + __ SmiToDoubleArrayOffset(r9, r8); + __ add(r9, r10, r9); +#if V8_TARGET_ARCH_PPC64 + __ mov(r7, Operand(kHoleNanInt64)); +#else + __ mov(r7, Operand(kHoleNanLower32)); + __ mov(r8, Operand(kHoleNanUpper32)); +#endif + // r6: begin of source FixedArray element fields, not tagged + // r7: kHoleNanLower32 + // r8: kHoleNanUpper32 + // r9: end of destination FixedDoubleArray, not tagged + // r10: begin of FixedDoubleArray element fields, not tagged + + __ b(&entry); + + __ bind(&only_change_map); + __ StoreP(r6, FieldMemOperand(r5, HeapObject::kMapOffset), r0); + __ RecordWriteField(r5, + HeapObject::kMapOffset, + r6, + r22, + kLRHasBeenSaved, + kDontSaveFPRegs, + OMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + __ b(&done); + + // Call into runtime if GC is required. + __ bind(&gc_required); + __ Pop(r0, r30); + __ mtlr(r0); + __ b(fail); + + // Convert and copy elements. + __ bind(&loop); + __ LoadP(r22, MemOperand(r6)); + __ addi(r6, r6, Operand(kPointerSize)); + // r22: current element + __ UntagAndJumpIfNotSmi(r22, r22, &convert_hole); + + // Normal smi, convert to double and store. + FloatingPointHelper::ConvertIntToDouble( + masm, r22, d0); + __ stfd(d0, MemOperand(r10, 0)); + __ addi(r10, r10, Operand(8)); + + __ b(&entry); + + // Hole found, store the-hole NaN. + __ bind(&convert_hole); + if (FLAG_debug_code) { + // Restore a "smi-untagged" heap object. + __ LoadP(r22, MemOperand(r6, -kPointerSize)); + __ CompareRoot(r22, Heap::kTheHoleValueRootIndex); + __ Assert(eq, "object found in smi-only array"); + } +#if V8_TARGET_ARCH_PPC64 + __ std(r7, MemOperand(r10, 0)); +#else +#if __FLOAT_WORD_ORDER == __LITTLE_ENDIAN + __ stw(r7, MemOperand(r10, 0)); + __ stw(r8, MemOperand(r10, 4)); +#else + __ stw(r8, MemOperand(r10, 0)); + __ stw(r7, MemOperand(r10, 4)); +#endif +#endif + __ addi(r10, r10, Operand(8)); + + __ bind(&entry); + __ cmp(r10, r9); + __ blt(&loop); + + __ Pop(r0, r30); + __ mtlr(r0); + __ bind(&done); +} + + +void ElementsTransitionGenerator::GenerateDoubleToObject( + MacroAssembler* masm, Label* fail) { + // ----------- S t a t e ------------- + // -- r3 : value + // -- r4 : key + // -- r5 : receiver + // -- lr : return address + // -- r6 : target map, scratch for subsequent call + // -- r7 : scratch (elements) + // ----------------------------------- + Label entry, loop, convert_hole, gc_required, only_change_map; + + // Check for empty arrays, which only require a map transition and no changes + // to the backing store. + __ LoadP(r7, FieldMemOperand(r5, JSObject::kElementsOffset)); + __ CompareRoot(r7, Heap::kEmptyFixedArrayRootIndex); + __ beq(&only_change_map); + + __ Push(r6, r5, r4, r3); + __ LoadP(r8, FieldMemOperand(r7, FixedArray::kLengthOffset)); + // r7: source FixedDoubleArray + // r8: number of elements (smi-tagged) + + // Allocate new FixedArray. + __ li(r3, Operand(FixedDoubleArray::kHeaderSize)); + __ SmiToPtrArrayOffset(r0, r8); + __ add(r3, r3, r0); + __ AllocateInNewSpace(r3, r9, r10, r22, &gc_required, NO_ALLOCATION_FLAGS); + // r9: destination FixedArray, not tagged as heap object + // Set destination FixedDoubleArray's length and map. + __ LoadRoot(r22, Heap::kFixedArrayMapRootIndex); + __ StoreP(r8, MemOperand(r9, FixedDoubleArray::kLengthOffset)); + __ StoreP(r22, MemOperand(r9, HeapObject::kMapOffset)); + + // Prepare for conversion loop. + __ addi(r7, r7, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); + __ addi(r6, r9, Operand(FixedArray::kHeaderSize)); + __ addi(r9, r9, Operand(kHeapObjectTag)); + __ SmiToPtrArrayOffset(r8, r8); + __ add(r8, r6, r8); + __ LoadRoot(r10, Heap::kTheHoleValueRootIndex); + __ LoadRoot(r22, Heap::kHeapNumberMapRootIndex); + // Using offsetted addresses in r7 to fully take advantage of post-indexing. + // r6: begin of destination FixedArray element fields, not tagged + // r7: begin of source FixedDoubleArray element fields, not tagged + // r8: end of destination FixedArray, not tagged + // r9: destination FixedArray + // r10: the-hole pointer + // r22: heap number map + __ b(&entry); + + // Call into runtime if GC is required. + __ bind(&gc_required); + __ Pop(r6, r5, r4, r3); + __ b(fail); + + __ bind(&loop); +#if __FLOAT_WORD_ORDER == __LITTLE_ENDIAN + __ lwz(r4, MemOperand(r7, 4)); +#else + __ lwz(r4, MemOperand(r7)); +#endif + __ addi(r7, r7, Operand(8)); + // r4: current element's upper 32 bit + // r7: address of next element's upper 32 bit + __ Cmpi(r4, Operand(kHoleNanUpper32), r0); + __ beq(&convert_hole); + + // Non-hole double, copy value into a heap number. + __ AllocateHeapNumber(r5, r3, r4, r22, &gc_required); + // r5: new heap number +#if V8_TARGET_ARCH_PPC64 + __ ld(r3, MemOperand(r7, -8)); + __ addi(r4, r5, Operand(-1)); // subtract tag for std + __ std(r3, MemOperand(r4, HeapNumber::kValueOffset)); +#else +#if __FLOAT_WORD_ORDER == __LITTLE_ENDIAN + __ lwz(r3, MemOperand(r7, -8)); + __ lwz(r4, MemOperand(r7, -4)); + __ stw(r3, FieldMemOperand(r5, HeapNumber::kValueOffset)); + __ stw(r4, FieldMemOperand(r5, HeapNumber::kValueOffset+4)); +#else + __ lwz(r3, MemOperand(r7, -4)); + __ lwz(r4, MemOperand(r7, -8)); + __ stw(r3, FieldMemOperand(r5, HeapNumber::kValueOffset+4)); + __ stw(r4, FieldMemOperand(r5, HeapNumber::kValueOffset)); +#endif +#endif + __ mr(r3, r6); + __ StoreP(r5, MemOperand(r6)); + __ addi(r6, r6, Operand(kPointerSize)); + __ RecordWrite(r9, + r3, + r5, + kLRHasNotBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + __ b(&entry); + + // Replace the-hole NaN with the-hole pointer. + __ bind(&convert_hole); + __ StoreP(r10, MemOperand(r6)); + __ addi(r6, r6, Operand(kPointerSize)); + + __ bind(&entry); + __ cmpl(r6, r8); + __ blt(&loop); + + __ Pop(r6, r5, r4, r3); + // Replace receiver's backing store with newly created and filled FixedArray. + __ StoreP(r9, FieldMemOperand(r5, JSObject::kElementsOffset), r0); + __ RecordWriteField(r5, + JSObject::kElementsOffset, + r9, + r22, + kLRHasNotBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + + __ bind(&only_change_map); + // Update receiver's map. + __ StoreP(r6, FieldMemOperand(r5, HeapObject::kMapOffset), r0); + __ RecordWriteField(r5, + HeapObject::kMapOffset, + r6, + r22, + kLRHasNotBeenSaved, + kDontSaveFPRegs, + OMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); +} + + +// roohack - assume ip can be used as a scratch register below +void StringCharLoadGenerator::Generate(MacroAssembler* masm, + Register string, + Register index, + Register result, + Label* call_runtime) { + // Fetch the instance type of the receiver into result register. + __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset)); + __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); + + // We need special handling for indirect strings. + Label check_sequential; + __ andi(r0, result, Operand(kIsIndirectStringMask)); + __ beq(&check_sequential, cr0); + + // Dispatch on the indirect string shape: slice or cons. + Label cons_string; + __ mov(ip, Operand(kSlicedNotConsMask)); + __ and_(r0, result, ip, SetRC); + __ beq(&cons_string, cr0); + + // Handle slices. + Label indirect_string_loaded; + __ LoadP(result, FieldMemOperand(string, SlicedString::kOffsetOffset)); + __ LoadP(string, FieldMemOperand(string, SlicedString::kParentOffset)); + __ SmiUntag(ip, result); + __ add(index, index, ip); + __ b(&indirect_string_loaded); + + // Handle cons strings. + // Check whether the right hand side is the empty string (i.e. if + // this is really a flat string in a cons string). If that is not + // the case we would rather go to the runtime system now to flatten + // the string. + __ bind(&cons_string); + __ LoadP(result, FieldMemOperand(string, ConsString::kSecondOffset)); + __ CompareRoot(result, Heap::kEmptyStringRootIndex); + __ bne(call_runtime); + // Get the first of the two strings and load its instance type. + __ LoadP(string, FieldMemOperand(string, ConsString::kFirstOffset)); + + __ bind(&indirect_string_loaded); + __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset)); + __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); + + // Distinguish sequential and external strings. Only these two string + // representations can reach here (slices and flat cons strings have been + // reduced to the underlying sequential or external string). + Label external_string, check_encoding; + __ bind(&check_sequential); + STATIC_ASSERT(kSeqStringTag == 0); + __ andi(r0, result, Operand(kStringRepresentationMask)); + __ bne(&external_string, cr0); + + // Prepare sequential strings + STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize); + __ addi(string, + string, + Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + __ b(&check_encoding); + + // Handle external strings. + __ bind(&external_string); + if (FLAG_debug_code) { + // Assert that we do not have a cons or slice (indirect strings) here. + // Sequential strings have already been ruled out. + __ andi(r0, result, Operand(kIsIndirectStringMask)); + __ Assert(eq, "external string expected, but not found", cr0); + } + // Rule out short external strings. + STATIC_CHECK(kShortExternalStringTag != 0); + __ andi(r0, result, Operand(kShortExternalStringMask)); + __ bne(call_runtime, cr0); + __ LoadP(string, + FieldMemOperand(string, ExternalString::kResourceDataOffset)); + + Label ascii, done; + __ bind(&check_encoding); + STATIC_ASSERT(kTwoByteStringTag == 0); + __ andi(r0, result, Operand(kStringEncodingMask)); + __ bne(&ascii, cr0); + // Two-byte string. + __ ShiftLeftImm(result, index, Operand(1)); + __ lhzx(result, MemOperand(string, result)); + __ b(&done); + __ bind(&ascii); + // Ascii string. + __ lbzx(result, MemOperand(string, index)); + __ bind(&done); +} + +#undef __ + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_PPC diff --git a/src/ppc/codegen-ppc.h b/src/ppc/codegen-ppc.h new file mode 100644 index 0000000..379966c --- /dev/null +++ b/src/ppc/codegen-ppc.h @@ -0,0 +1,96 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// +// Copyright IBM Corp. 2012, 2013. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_PPC_CODEGEN_PPC_H_ +#define V8_PPC_CODEGEN_PPC_H_ + +#include "ast.h" +#include "ic-inl.h" + +namespace v8 { +namespace internal { + +// Forward declarations +class CompilationInfo; + +enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF }; + +// ------------------------------------------------------------------------- +// CodeGenerator + +class CodeGenerator: public AstVisitor { + public: + static bool MakeCode(CompilationInfo* info); + + // Printing of AST, etc. as requested by flags. + static void MakeCodePrologue(CompilationInfo* info); + + // Allocate and install the code. + static Handle MakeCodeEpilogue(MacroAssembler* masm, + Code::Flags flags, + CompilationInfo* info); + + // Print the code after compiling it. + static void PrintCode(Handle code, CompilationInfo* info); + + static bool ShouldGenerateLog(Expression* type); + + static void SetFunctionInfo(Handle fun, + FunctionLiteral* lit, + bool is_toplevel, + Handle