LCOV - code coverage report
Current view: top level - js/src/vm - Stopwatch.cpp (source / functions) Hit Total Coverage
Test: output.info Lines: 34 255 13.3 %
Date: 2017-07-14 16:53:18 Functions: 8 53 15.1 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
       2             :  * vim: set ts=8 sts=4 et sw=4 tw=99:
       3             :  * This Source Code Form is subject to the terms of the Mozilla Public
       4             :  * License, v. 2.0. If a copy of the MPL was not distributed with this
       5             :  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
       6             : 
       7             : #include "vm/Stopwatch.h"
       8             : 
       9             : #include "mozilla/ArrayUtils.h"
      10             : #include "mozilla/IntegerTypeTraits.h"
      11             : #include "mozilla/Unused.h"
      12             : 
      13             : #if defined(XP_WIN)
      14             : #include <processthreadsapi.h>
      15             : #endif // defined(XP_WIN)
      16             : 
      17             : #include "jscompartment.h"
      18             : #include "jswin.h"
      19             : 
      20             : #include "gc/Zone.h"
      21             : #include "vm/Runtime.h"
      22             : 
      23             : 
      24             : namespace js {
      25             : 
      26             : bool
      27           0 : PerformanceMonitoring::addRecentGroup(PerformanceGroup* group)
      28             : {
      29           0 :     if (group->isUsedInThisIteration())
      30           0 :         return true;
      31             : 
      32           0 :     group->setIsUsedInThisIteration(true);
      33           0 :     return recentGroups_.append(group);
      34             : }
      35             : 
      36             : void
      37        1230 : PerformanceMonitoring::reset()
      38             : {
      39             :     // All ongoing measures are dependent on the current iteration#.
      40             :     // By incrementing it, we mark all data as stale. Stale data will
      41             :     // be overwritten progressively during the execution.
      42        1230 :     ++iteration_;
      43        1230 :     recentGroups_.clear();
      44             : 
      45             :     // Every so often, we will be rescheduled to another CPU. If this
      46             :     // happens, we may end up with an entirely unsynchronized
      47             :     // timestamp counter. If we do not reset
      48             :     // `highestTimestampCounter_`, we could end up ignoring entirely
      49             :     // valid sets of measures just because we are on a CPU that has a
      50             :     // lower RDTSC.
      51        1230 :     highestTimestampCounter_ = 0;
      52        1230 : }
      53             : 
      54             : void
      55           0 : PerformanceMonitoring::start()
      56             : {
      57           0 :     if (!isMonitoringJank_)
      58           0 :         return;
      59             : 
      60           0 :     if (iteration_ == startedAtIteration_) {
      61             :         // The stopwatch is already started for this iteration.
      62           0 :         return;
      63             :     }
      64             : 
      65           0 :     startedAtIteration_ = iteration_;
      66           0 :     if (stopwatchStartCallback)
      67           0 :         stopwatchStartCallback(iteration_, stopwatchStartClosure);
      68             : }
      69             : 
      70             : // Commit the data that has been collected during the iteration
      71             : // into the actual `PerformanceData`.
      72             : //
      73             : // We use the proportion of cycles-spent-in-group over
      74             : // cycles-spent-in-toplevel-group as an approximation to allocate
      75             : // system (kernel) time and user (CPU) time to each group. Note
      76             : // that cycles are not an exact measure:
      77             : //
      78             : // 1. if the computer has gone to sleep, the clock may be reset to 0;
      79             : // 2. if the process is moved between CPUs/cores, it may end up on a CPU
      80             : //    or core with an unsynchronized clock;
      81             : // 3. the mapping between clock cycles and walltime varies with the current
      82             : //    frequency of the CPU;
      83             : // 4. other threads/processes using the same CPU will also increment
      84             : //    the counter.
      85             : //
      86             : // ** Effect of 1. (computer going to sleep)
      87             : //
      88             : // We assume that this will happen very seldom. Since the final numbers
      89             : // are bounded by the CPU time and Kernel time reported by `getresources`,
      90             : // the effect will be contained to a single iteration of the event loop.
      91             : //
      92             : // ** Effect of 2. (moving between CPUs/cores)
      93             : //
      94             : // On platforms that support it, we only measure the number of cycles
      95             : // if we start and end execution of a group on the same
      96             : // CPU/core. While there is a small window (a few cycles) during which
      97             : // the thread can be migrated without us noticing, we expect that this
      98             : // will happen rarely enough that this won't affect the statistics
      99             : // meaningfully.
     100             : //
     101             : // On other platforms, assuming that the probability of jumping
     102             : // between CPUs/cores during a given (real) cycle is constant, and
     103             : // that the distribution of differences between clocks is even, the
     104             : // probability that the number of cycles reported by a measure is
     105             : // modified by X cycles should be a gaussian distribution, with groups
     106             : // with longer execution having a larger amplitude than groups with
     107             : // shorter execution. Since we discard measures that result in a
     108             : // negative number of cycles, this distribution is actually skewed
     109             : // towards over-estimating the number of cycles of groups that already
     110             : // have many cycles and under-estimating the number of cycles that
     111             : // already have fewer cycles.
     112             : //
     113             : // Since the final numbers are bounded by the CPU time and Kernel time
     114             : // reported by `getresources`, we accept this bias.
     115             : //
     116             : // ** Effect of 3. (mapping between clock cycles and walltime)
     117             : //
     118             : // Assuming that this is evenly distributed, we expect that this will
     119             : // eventually balance out.
     120             : //
     121             : // ** Effect of 4. (cycles increase with system activity)
     122             : //
     123             : // Assuming that, within an iteration of the event loop, this happens
     124             : // unformly over time, this will skew towards over-estimating the number
     125             : // of cycles of groups that already have many cycles and under-estimating
     126             : // the number of cycles that already have fewer cycles.
     127             : //
     128             : // Since the final numbers are bounded by the CPU time and Kernel time
     129             : // reported by `getresources`, we accept this bias.
     130             : //
     131             : // ** Big picture
     132             : //
     133             : // Computing the number of cycles is fast and should be accurate
     134             : // enough in practice. Alternatives (such as calling `getresources`
     135             : // all the time or sampling from another thread) are very expensive
     136             : // in system calls and/or battery and not necessarily more accurate.
     137             : bool
     138        1227 : PerformanceMonitoring::commit()
     139             : {
     140             :     // Maximal initialization size, in elements for the vector of groups.
     141             :     static const size_t MAX_GROUPS_INIT_CAPACITY = 1024;
     142             : 
     143             : #if !defined(MOZ_HAVE_RDTSC)
     144             :     // The AutoStopwatch is only executed if `MOZ_HAVE_RDTSC`.
     145             :     return false;
     146             : #endif // !defined(MOZ_HAVE_RDTSC)
     147             : 
     148        1227 :     if (!isMonitoringJank_) {
     149             :         // Either we have not started monitoring or monitoring has
     150             :         // been cancelled during the iteration.
     151        1227 :         return true;
     152             :     }
     153             : 
     154           0 :     if (startedAtIteration_ != iteration_) {
     155             :         // No JS code has been monitored during this iteration.
     156           0 :         return true;
     157             :     }
     158             : 
     159             :     // The move operation is generally constant time, unless
     160             :     // `recentGroups_.length()` is very small, in which case
     161             :     // it's fast just because it's small.
     162           0 :     PerformanceGroupVector recentGroups(Move(recentGroups_));
     163           0 :     recentGroups_ = PerformanceGroupVector(); // Reconstruct after `Move`.
     164             : 
     165           0 :     bool success = true;
     166           0 :     if (stopwatchCommitCallback)
     167           0 :         success = stopwatchCommitCallback(iteration_, recentGroups, stopwatchCommitClosure);
     168             : 
     169             :     // Heuristic: we expect to have roughly the same number of groups as in
     170             :     // the previous iteration.
     171           0 :     const size_t capacity = std::min(recentGroups.capacity(), MAX_GROUPS_INIT_CAPACITY);
     172           0 :     success = recentGroups_.reserve(capacity)
     173           0 :             && success;
     174             : 
     175             :     // Reset immediately, to make sure that we're not hit by the end
     176             :     // of a nested event loop (which would cause `commit` to be called
     177             :     // twice in succession).
     178           0 :     reset();
     179           0 :     return success;
     180             : }
     181             : 
     182             : uint64_t
     183           0 : PerformanceMonitoring::monotonicReadTimestampCounter()
     184             : {
     185             : #if defined(MOZ_HAVE_RDTSC)
     186           0 :     const uint64_t hardware = ReadTimestampCounter();
     187           0 :     if (highestTimestampCounter_ < hardware)
     188           0 :         highestTimestampCounter_ = hardware;
     189           0 :     return highestTimestampCounter_;
     190             : #else
     191             :     return 0;
     192             : #endif // defined(MOZ_HAVE_RDTSC)
     193             : }
     194             : 
     195             : void
     196           0 : PerformanceMonitoring::dispose(JSRuntime* rt)
     197             : {
     198           0 :     reset();
     199           0 :     for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
     200           0 :         c->performanceMonitoring.unlink();
     201             :     }
     202           0 : }
     203             : 
     204           0 : PerformanceGroupHolder::~PerformanceGroupHolder()
     205             : {
     206           0 :     unlink();
     207           0 : }
     208             : 
     209             : void
     210         595 : PerformanceGroupHolder::unlink()
     211             : {
     212         595 :     initialized_ = false;
     213         595 :     groups_.clear();
     214         595 : }
     215             : 
     216             : const PerformanceGroupVector*
     217       17386 : PerformanceGroupHolder::getGroups(JSContext* cx)
     218             : {
     219       17386 :     if (initialized_)
     220           0 :         return &groups_;
     221             : 
     222       17386 :     if (!runtime_->performanceMonitoring().getGroupsCallback)
     223       17386 :         return nullptr;
     224             : 
     225           0 :     if (!runtime_->performanceMonitoring().getGroupsCallback(cx, groups_, runtime_->performanceMonitoring().getGroupsClosure))
     226           0 :         return nullptr;
     227             : 
     228           0 :     initialized_ = true;
     229           0 :     return &groups_;
     230             : }
     231             : 
     232       17386 : AutoStopwatch::AutoStopwatch(JSContext* cx MOZ_GUARD_OBJECT_NOTIFIER_PARAM_IN_IMPL)
     233             :   : cx_(cx)
     234             :   , iteration_(0)
     235             :   , isMonitoringJank_(false)
     236             :   , isMonitoringCPOW_(false)
     237             :   , cyclesStart_(0)
     238       17386 :   , CPOWTimeStart_(0)
     239             : {
     240       17386 :     MOZ_GUARD_OBJECT_NOTIFIER_INIT;
     241             : 
     242       17386 :     JSCompartment* compartment = cx_->compartment();
     243       17386 :     if (MOZ_UNLIKELY(compartment->scheduledForDestruction))
     244           0 :         return;
     245             : 
     246       17386 :     JSRuntime* runtime = cx_->runtime();
     247       17386 :     iteration_ = runtime->performanceMonitoring().iteration();
     248             : 
     249       17386 :     const PerformanceGroupVector* groups = compartment->performanceMonitoring.getGroups(cx);
     250       17386 :     if (!groups) {
     251             :       // Either the embedding has not provided any performance
     252             :       // monitoring logistics or there was an error that prevents
     253             :       // performance monitoring.
     254       17386 :       return;
     255             :     }
     256           0 :     for (auto group = groups->begin(); group < groups->end(); group++) {
     257           0 :       auto acquired = acquireGroup(*group);
     258           0 :       if (acquired) {
     259           0 :           if (!groups_.append(acquired))
     260           0 :               MOZ_CRASH();
     261             :       }
     262             :     }
     263           0 :     if (groups_.length() == 0) {
     264             :       // We are not in charge of monitoring anything.
     265           0 :       return;
     266             :     }
     267             : 
     268             :     // Now that we are sure that JS code is being executed,
     269             :     // initialize the stopwatch for this iteration, lazily.
     270           0 :     runtime->performanceMonitoring().start();
     271           0 :     enter();
     272             : }
     273             : 
     274       34758 : AutoStopwatch::~AutoStopwatch()
     275             : {
     276       17379 :     if (groups_.length() == 0) {
     277             :         // We are not in charge of monitoring anything.
     278       17379 :         return;
     279             :     }
     280             : 
     281           0 :     JSCompartment* compartment = cx_->compartment();
     282           0 :     if (MOZ_UNLIKELY(compartment->scheduledForDestruction))
     283           0 :         return;
     284             : 
     285           0 :     JSRuntime* runtime = cx_->runtime();
     286           0 :     if (MOZ_UNLIKELY(iteration_ != runtime->performanceMonitoring().iteration())) {
     287             :         // We have entered a nested event loop at some point.
     288             :         // Any information we may have is obsolete.
     289           0 :         return;
     290             :     }
     291             : 
     292           0 :     mozilla::Unused << exit(); // Sadly, there is nothing we can do about an error at this point.
     293             : 
     294           0 :     for (auto group = groups_.begin(); group < groups_.end(); group++)
     295           0 :         releaseGroup(*group);
     296       17379 : }
     297             : 
     298             : void
     299           0 : AutoStopwatch::enter()
     300             : {
     301           0 :     JSRuntime* runtime = cx_->runtime();
     302             : 
     303           0 :     if (runtime->performanceMonitoring().isMonitoringCPOW()) {
     304           0 :         CPOWTimeStart_ = runtime->performanceMonitoring().totalCPOWTime;
     305           0 :         isMonitoringCPOW_ = true;
     306             :     }
     307             : 
     308           0 :     if (runtime->performanceMonitoring().isMonitoringJank()) {
     309           0 :         cyclesStart_ = this->getCycles(runtime);
     310           0 :         cpuStart_ = this->getCPU();
     311           0 :         isMonitoringJank_ = true;
     312             :     }
     313           0 : }
     314             : 
     315             : bool
     316           0 : AutoStopwatch::exit()
     317             : {
     318           0 :     JSRuntime* runtime = cx_->runtime();
     319             : 
     320           0 :     uint64_t cyclesDelta = 0;
     321           0 :     if (isMonitoringJank_ && runtime->performanceMonitoring().isMonitoringJank()) {
     322             :         // We were monitoring jank when we entered and we still are.
     323             : 
     324             :         // If possible, discard results when we don't end on the
     325             :         // same CPU as we started.  Note that we can be
     326             :         // rescheduled to another CPU beween `getCycles()` and
     327             :         // `getCPU()`.  We hope that this will happen rarely
     328             :         // enough that the impact on our statistics will remain
     329             :         // limited.
     330           0 :         const cpuid_t cpuEnd = this->getCPU();
     331           0 :         if (isSameCPU(cpuStart_, cpuEnd)) {
     332           0 :             const uint64_t cyclesEnd = getCycles(runtime);
     333           0 :             cyclesDelta = cyclesEnd - cyclesStart_; // Always >= 0 by definition of `getCycles`.
     334             :         }
     335             : // Temporary disable untested code path.
     336             : #if 0 // WINVER >= 0x600
     337             :         updateTelemetry(cpuStart_, cpuEnd);
     338             : #elif defined(__linux__)
     339           0 :         updateTelemetry(cpuStart_, cpuEnd);
     340             : #endif // WINVER >= 0x600 || _linux__
     341             :     }
     342             : 
     343           0 :     uint64_t CPOWTimeDelta = 0;
     344           0 :     if (isMonitoringCPOW_ && runtime->performanceMonitoring().isMonitoringCPOW()) {
     345             :         // We were monitoring CPOW when we entered and we still are.
     346           0 :         const uint64_t CPOWTimeEnd = runtime->performanceMonitoring().totalCPOWTime;
     347           0 :         CPOWTimeDelta = getDelta(CPOWTimeEnd, CPOWTimeStart_);
     348             :     }
     349           0 :     return addToGroups(cyclesDelta, CPOWTimeDelta);
     350             : }
     351             : 
     352             : void
     353           0 : AutoStopwatch::updateTelemetry(const cpuid_t& cpuStart_, const cpuid_t& cpuEnd)
     354             : {
     355           0 :   JSRuntime* runtime = cx_->runtime();
     356             : 
     357           0 :     if (isSameCPU(cpuStart_, cpuEnd))
     358           0 :         runtime->performanceMonitoring().testCpuRescheduling.stayed += 1;
     359             :     else
     360           0 :         runtime->performanceMonitoring().testCpuRescheduling.moved += 1;
     361           0 : }
     362             : 
     363             : PerformanceGroup*
     364           0 : AutoStopwatch::acquireGroup(PerformanceGroup* group)
     365             : {
     366           0 :     MOZ_ASSERT(group);
     367             : 
     368           0 :     if (group->isAcquired(iteration_))
     369           0 :         return nullptr;
     370             : 
     371           0 :     if (!group->isActive())
     372           0 :         return nullptr;
     373             : 
     374           0 :     group->acquire(iteration_, this);
     375           0 :     return group;
     376             : }
     377             : 
     378             : void
     379           0 : AutoStopwatch::releaseGroup(PerformanceGroup* group)
     380             : {
     381           0 :     MOZ_ASSERT(group);
     382           0 :         group->release(iteration_, this);
     383           0 : }
     384             : 
     385             : bool
     386           0 : AutoStopwatch::addToGroups(uint64_t cyclesDelta, uint64_t CPOWTimeDelta)
     387             : {
     388           0 :   JSRuntime* runtime = cx_->runtime();
     389             : 
     390           0 :     for (auto group = groups_.begin(); group < groups_.end(); ++group) {
     391           0 :       if (!addToGroup(runtime, cyclesDelta, CPOWTimeDelta, *group))
     392           0 :         return false;
     393             :     }
     394           0 :     return true;
     395             : }
     396             : 
     397             : bool
     398           0 : AutoStopwatch::addToGroup(JSRuntime* runtime, uint64_t cyclesDelta, uint64_t CPOWTimeDelta, PerformanceGroup* group)
     399             : {
     400           0 :     MOZ_ASSERT(group);
     401           0 :     MOZ_ASSERT(group->isAcquired(iteration_, this));
     402             : 
     403           0 :     if (!runtime->performanceMonitoring().addRecentGroup(group))
     404           0 :       return false;
     405           0 :     group->addRecentTicks(iteration_, 1);
     406           0 :     group->addRecentCycles(iteration_, cyclesDelta);
     407           0 :     group->addRecentCPOW(iteration_, CPOWTimeDelta);
     408           0 :     return true;
     409             : }
     410             : 
     411             : uint64_t
     412           0 : AutoStopwatch::getDelta(const uint64_t end, const uint64_t start) const
     413             : {
     414           0 :     if (start >= end)
     415           0 :       return 0;
     416           0 :     return end - start;
     417             : }
     418             : 
     419             : uint64_t
     420           0 : AutoStopwatch::getCycles(JSRuntime* runtime) const
     421             : {
     422           0 :     return runtime->performanceMonitoring().monotonicReadTimestampCounter();
     423             : }
     424             : 
     425             : cpuid_t inline
     426           0 : AutoStopwatch::getCPU() const
     427             : {
     428             : // Temporary disable untested code path.
     429             : #if 0 // defined(XP_WIN) && WINVER >= _WIN32_WINNT_VISTA
     430             :     PROCESSOR_NUMBER proc;
     431             :     GetCurrentProcessorNumberEx(&proc);
     432             : 
     433             :     cpuid_t result(proc.Group, proc.Number);
     434             :     return result;
     435             : #else
     436           0 :     return {};
     437             : #endif // defined(XP_WIN)
     438             : }
     439             : 
     440             : bool inline
     441           0 : AutoStopwatch::isSameCPU(const cpuid_t& a, const cpuid_t& b) const
     442             : {
     443             : // Temporary disable untested code path.
     444             : #if 0 // defined(XP_WIN)  && WINVER >= _WIN32_WINNT_VISTA
     445             :     return a.group_ == b.group_ && a.number_ == b.number_;
     446             : #else
     447           0 :     return true;
     448             : #endif
     449             : }
     450             : 
     451           0 : PerformanceGroup::PerformanceGroup()
     452             :     : recentCycles_(0)
     453             :     , recentTicks_(0)
     454             :     , recentCPOW_(0)
     455             :     , iteration_(0)
     456             :     , isActive_(false)
     457             :     , isUsedInThisIteration_(false)
     458             :     , owner_(nullptr)
     459           0 :     , refCount_(0)
     460           0 : { }
     461             : 
     462             : uint64_t
     463           0 : PerformanceGroup::iteration() const
     464             : {
     465           0 :     return iteration_;
     466             : }
     467             : 
     468             : 
     469             : bool
     470           0 : PerformanceGroup::isAcquired(uint64_t it) const
     471             : {
     472           0 :     return owner_ != nullptr && iteration_ == it;
     473             : }
     474             : 
     475             : bool
     476           0 : PerformanceGroup::isAcquired(uint64_t it, const AutoStopwatch* owner) const
     477             : {
     478           0 :     return owner_ == owner && iteration_ == it;
     479             : }
     480             : 
     481             : void
     482           0 : PerformanceGroup::acquire(uint64_t it, const AutoStopwatch* owner)
     483             : {
     484           0 :     if (iteration_ != it) {
     485             :         // Any data that pretends to be recent is actually bound
     486             :         // to an older iteration and therefore stale.
     487           0 :         resetRecentData();
     488             :     }
     489           0 :     iteration_ = it;
     490           0 :     owner_ = owner;
     491           0 : }
     492             : 
     493             : void
     494           0 : PerformanceGroup::release(uint64_t it, const AutoStopwatch* owner)
     495             : {
     496           0 :     if (iteration_ != it)
     497           0 :         return;
     498             : 
     499           0 :     MOZ_ASSERT(owner == owner_ || owner_ == nullptr);
     500           0 :     owner_ = nullptr;
     501             : }
     502             : 
     503             : void
     504           0 : PerformanceGroup::resetRecentData()
     505             : {
     506           0 :     recentCycles_ = 0;
     507           0 :     recentTicks_ = 0;
     508           0 :     recentCPOW_ = 0;
     509           0 :     isUsedInThisIteration_ = false;
     510           0 : }
     511             : 
     512             : 
     513             : uint64_t
     514           0 : PerformanceGroup::recentCycles(uint64_t iteration) const
     515             : {
     516           0 :     MOZ_ASSERT(iteration == iteration_);
     517           0 :     return recentCycles_;
     518             : }
     519             : 
     520             : void
     521           0 : PerformanceGroup::addRecentCycles(uint64_t iteration, uint64_t cycles)
     522             : {
     523           0 :     MOZ_ASSERT(iteration == iteration_);
     524           0 :     recentCycles_ += cycles;
     525           0 : }
     526             : 
     527             : uint64_t
     528           0 : PerformanceGroup::recentTicks(uint64_t iteration) const
     529             : {
     530           0 :     MOZ_ASSERT(iteration == iteration_);
     531           0 :     return recentTicks_;
     532             : }
     533             : 
     534             : void
     535           0 : PerformanceGroup::addRecentTicks(uint64_t iteration, uint64_t ticks)
     536             : {
     537           0 :     MOZ_ASSERT(iteration == iteration_);
     538           0 :     recentTicks_ += ticks;
     539           0 : }
     540             : 
     541             : 
     542             : uint64_t
     543           0 : PerformanceGroup::recentCPOW(uint64_t iteration) const
     544             : {
     545           0 :     MOZ_ASSERT(iteration == iteration_);
     546           0 :     return recentCPOW_;
     547             : }
     548             : 
     549             : void
     550           0 : PerformanceGroup::addRecentCPOW(uint64_t iteration, uint64_t CPOW)
     551             : {
     552           0 :     MOZ_ASSERT(iteration == iteration_);
     553           0 :     recentCPOW_ += CPOW;
     554           0 : }
     555             : 
     556             : 
     557             : bool
     558           0 : PerformanceGroup::isActive() const
     559             : {
     560           0 :     return isActive_;
     561             : }
     562             : 
     563             : void
     564           0 : PerformanceGroup::setIsActive(bool value)
     565             : {
     566           0 :   isActive_ = value;
     567           0 : }
     568             : 
     569             : void
     570           0 : PerformanceGroup::setIsUsedInThisIteration(bool value)
     571             : {
     572           0 :   isUsedInThisIteration_ = value;
     573           0 : }
     574             : bool
     575           0 : PerformanceGroup::isUsedInThisIteration() const
     576             : {
     577           0 :   return isUsedInThisIteration_;
     578             : }
     579             : 
     580             : void
     581           0 : PerformanceGroup::AddRef()
     582             : {
     583           0 :     ++refCount_;
     584           0 : }
     585             : 
     586             : void
     587           0 : PerformanceGroup::Release()
     588             : {
     589           0 :     MOZ_ASSERT(refCount_ > 0);
     590           0 :     --refCount_;
     591           0 :     if (refCount_ > 0)
     592           0 :         return;
     593             : 
     594           0 :     this->Delete();
     595             : }
     596             : 
     597             : JS_PUBLIC_API(bool)
     598           0 : SetStopwatchStartCallback(JSContext* cx, StopwatchStartCallback cb, void* closure)
     599             : {
     600           0 :     cx->runtime()->performanceMonitoring().setStopwatchStartCallback(cb, closure);
     601           0 :     return true;
     602             : }
     603             : 
     604             : JS_PUBLIC_API(bool)
     605           0 : SetStopwatchCommitCallback(JSContext* cx, StopwatchCommitCallback cb, void* closure)
     606             : {
     607           0 :     cx->runtime()->performanceMonitoring().setStopwatchCommitCallback(cb, closure);
     608           0 :     return true;
     609             : }
     610             : 
     611             : JS_PUBLIC_API(bool)
     612           0 : SetGetPerformanceGroupsCallback(JSContext* cx, GetGroupsCallback cb, void* closure)
     613             : {
     614           0 :     cx->runtime()->performanceMonitoring().setGetGroupsCallback(cb, closure);
     615           0 :     return true;
     616             : }
     617             : 
     618             : JS_PUBLIC_API(bool)
     619        1227 : FlushPerformanceMonitoring(JSContext* cx)
     620             : {
     621        1227 :     return cx->runtime()->performanceMonitoring().commit();
     622             : }
     623             : JS_PUBLIC_API(void)
     624        1230 : ResetPerformanceMonitoring(JSContext* cx)
     625             : {
     626        1230 :     return cx->runtime()->performanceMonitoring().reset();
     627             : }
     628             : JS_PUBLIC_API(void)
     629           0 : DisposePerformanceMonitoring(JSContext* cx)
     630             : {
     631           0 :     return cx->runtime()->performanceMonitoring().dispose(cx->runtime());
     632             : }
     633             : 
     634             : JS_PUBLIC_API(bool)
     635           0 : SetStopwatchIsMonitoringJank(JSContext* cx, bool value)
     636             : {
     637           0 :     return cx->runtime()->performanceMonitoring().setIsMonitoringJank(value);
     638             : }
     639             : JS_PUBLIC_API(bool)
     640           0 : GetStopwatchIsMonitoringJank(JSContext* cx)
     641             : {
     642           0 :     return cx->runtime()->performanceMonitoring().isMonitoringJank();
     643             : }
     644             : 
     645             : JS_PUBLIC_API(bool)
     646           0 : SetStopwatchIsMonitoringCPOW(JSContext* cx, bool value)
     647             : {
     648           0 :     return cx->runtime()->performanceMonitoring().setIsMonitoringCPOW(value);
     649             : }
     650             : JS_PUBLIC_API(bool)
     651           0 : GetStopwatchIsMonitoringCPOW(JSContext* cx)
     652             : {
     653           0 :     return cx->runtime()->performanceMonitoring().isMonitoringCPOW();
     654             : }
     655             : 
     656             : JS_PUBLIC_API(void)
     657           0 : GetPerfMonitoringTestCpuRescheduling(JSContext* cx, uint64_t* stayed, uint64_t* moved)
     658             : {
     659           0 :     *stayed = cx->runtime()->performanceMonitoring().testCpuRescheduling.stayed;
     660           0 :     *moved = cx->runtime()->performanceMonitoring().testCpuRescheduling.moved;
     661           0 : }
     662             : 
     663             : JS_PUBLIC_API(void)
     664           0 : AddCPOWPerformanceDelta(JSContext* cx, uint64_t delta)
     665             : {
     666           0 :     cx->runtime()->performanceMonitoring().totalCPOWTime += delta;
     667           0 : }
     668             : 
     669             : 
     670             : } // namespace js
     671             : 

Generated by: LCOV version 1.13