fixed pod file added more libraries wich is require

This commit is contained in:
u-e 2018-12-05 12:29:49 +03:00
parent 26451ded18
commit b4236fd75d
13278 changed files with 2589408 additions and 2424 deletions

View File

@ -0,0 +1,15 @@
package com.esteem;
import com.facebook.react.ReactActivity;
public class MainActivity extends ReactActivity {
/**
* Returns the name of the main component registered from JavaScript.
* This is used to schedule rendering of the component.
*/
@Override
protected String getMainComponentName() {
return "eSteem";
}
}

View File

@ -0,0 +1,45 @@
package com.esteem;
import android.app.Application;
import com.facebook.react.ReactApplication;
import com.facebook.react.ReactNativeHost;
import com.facebook.react.ReactPackage;
import com.facebook.react.shell.MainReactPackage;
import com.facebook.soloader.SoLoader;
import java.util.Arrays;
import java.util.List;
public class MainApplication extends Application implements ReactApplication {
private final ReactNativeHost mReactNativeHost = new ReactNativeHost(this) {
@Override
public boolean getUseDeveloperSupport() {
return BuildConfig.DEBUG;
}
@Override
protected List<ReactPackage> getPackages() {
return Arrays.<ReactPackage>asList(
new MainReactPackage()
);
}
@Override
protected String getJSMainModuleName() {
return "index";
}
};
@Override
public ReactNativeHost getReactNativeHost() {
return mReactNativeHost;
}
@Override
public void onCreate() {
super.onCreate();
SoLoader.init(this, /* native exopackage */ false);
}
}

View File

@ -11,11 +11,21 @@ target 'eSteem' do
# Pods for eSteem
pod 'React', :path => '../node_modules/react-native', :subspecs => [
'RCTImage' # <- Add this line
'BatchedBridge'
'Core',
'CxxBridge', # Include this for RN >= 0.47
'DevSupport', # Include this to enable In-App Devmenu if RN >= 0.43
'RCTText',
'RCTNetwork',
'RCTWebSocket', # Needed for debugging
'RCTAnimation', # Needed for FlatList and animations running on native UI thread
'RCTImage',
]
pod 'yoga', :path => '../node_modules/react-native/ReactCommon/yoga'
# Third party deps podspec link
pod 'DoubleConversion', :podspec => '../node_modules/react-native/third-party-podspecs/DoubleConversion.podspec'
pod 'glog', :podspec => '../node_modules/react-native/third-party-podspecs/glog.podspec'
pod 'Folly', :podspec => '../node_modules/react-native/third-party-podspecs/Folly.podspec'
platform :ios, '9.0'
pod 'RNImageCropPicker', :path => '../node_modules/react-native-image-crop-picker'

View File

@ -8,14 +8,49 @@ PODS:
- AppCenter/Core
- AppCenterReactNativeShared (1.10.0):
- AppCenter/Core (= 1.11.0)
- boost-for-react-native (1.63.0)
- DoubleConversion (1.1.6)
- Folly (2016.10.31.00):
- boost-for-react-native
- DoubleConversion
- glog
- glog (0.3.5)
- QBImagePickerController (3.4.0)
- React/Core (0.57.7):
- yoga (= 0.57.7.React)
- React/CxxBridge (0.57.7):
- Folly (= 2016.10.31.00)
- React/Core
- React/cxxreact
- React/cxxreact (0.57.7):
- boost-for-react-native (= 1.63.0)
- Folly (= 2016.10.31.00)
- React/jschelpers
- React/jsinspector
- React/DevSupport (0.57.7):
- React/Core
- React/RCTWebSocket
- React/fishhook (0.57.7)
- React/jschelpers (0.57.7):
- Folly (= 2016.10.31.00)
- React/PrivateDatabase
- React/jsinspector (0.57.7)
- React/PrivateDatabase (0.57.7)
- React/RCTAnimation (0.57.7):
- React/Core
- React/RCTBlob (0.57.7):
- React/Core
- React/RCTImage (0.57.7):
- React/Core
- React/RCTNetwork
- React/RCTNetwork (0.57.7):
- React/Core
- React/RCTText (0.57.7):
- React/Core
- React/RCTWebSocket (0.57.7):
- React/Core
- React/fishhook
- React/RCTBlob
- RNImageCropPicker (0.21.3):
- QBImagePickerController
- React/Core
@ -28,7 +63,17 @@ DEPENDENCIES:
- AppCenter/Crashes (~> 1.11.0)
- AppCenter/Push (~> 1.11.0)
- AppCenterReactNativeShared (~> 1.10.0)
- DoubleConversion (from `../node_modules/react-native/third-party-podspecs/DoubleConversion.podspec`)
- Folly (from `../node_modules/react-native/third-party-podspecs/Folly.podspec`)
- glog (from `../node_modules/react-native/third-party-podspecs/glog.podspec`)
- React/Core (from `../node_modules/react-native`)
- React/CxxBridge (from `../node_modules/react-native`)
- React/DevSupport (from `../node_modules/react-native`)
- React/RCTAnimation (from `../node_modules/react-native`)
- React/RCTImage (from `../node_modules/react-native`)
- React/RCTNetwork (from `../node_modules/react-native`)
- React/RCTText (from `../node_modules/react-native`)
- React/RCTWebSocket (from `../node_modules/react-native`)
- RNImageCropPicker (from `../node_modules/react-native-image-crop-picker`)
- yoga (from `../node_modules/react-native/ReactCommon/yoga`)
@ -36,10 +81,17 @@ SPEC REPOS:
https://github.com/cocoapods/specs.git:
- AppCenter
- AppCenterReactNativeShared
- boost-for-react-native
- QBImagePickerController
- RSKImageCropper
EXTERNAL SOURCES:
DoubleConversion:
:podspec: "../node_modules/react-native/third-party-podspecs/DoubleConversion.podspec"
Folly:
:podspec: "../node_modules/react-native/third-party-podspecs/Folly.podspec"
glog:
:podspec: "../node_modules/react-native/third-party-podspecs/glog.podspec"
React:
:path: "../node_modules/react-native"
RNImageCropPicker:
@ -50,12 +102,16 @@ EXTERNAL SOURCES:
SPEC CHECKSUMS:
AppCenter: 3bccf8d733e337d0db574dd4cb0e33ab9637b7f2
AppCenterReactNativeShared: a77b000c2ac6dc2e44472621d7d0770f196e5822
boost-for-react-native: 39c7adb57c4e60d6c5479dd8623128eb5b3f0f2c
DoubleConversion: bb338842f62ab1d708ceb63ec3d999f0f3d98ecd
Folly: c89ac2d5c6ab169cd7397ef27485c44f35f742c7
glog: e8acf0ebbf99759d3ff18c86c292a5898282dcde
QBImagePickerController: d54cf93db6decf26baf6ed3472f336ef35cae022
React: 1fe0eb13d90b625d94c3b117c274dcfd2e760e11
RNImageCropPicker: 32ca4b9fef4e1b7b85ba69494242122948117e06
RSKImageCropper: 98296ad26b41753f796b6898d015509598f13d97
yoga: b1ce48b6cf950b98deae82838f5173ea7cf89e85
PODFILE CHECKSUM: ec86702456a70648c23972c332c8648d93335b88
PODFILE CHECKSUM: cd47cae4d60258a4caba7793e84c701fee9422dd
COCOAPODS: 1.5.3

26
ios/Pods/DoubleConversion/LICENSE generated Normal file
View File

@ -0,0 +1,26 @@
Copyright 2006-2011, the V8 project authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

54
ios/Pods/DoubleConversion/README generated Normal file
View File

@ -0,0 +1,54 @@
http://code.google.com/p/double-conversion
This project (double-conversion) provides binary-decimal and decimal-binary
routines for IEEE doubles.
The library consists of efficient conversion routines that have been extracted
from the V8 JavaScript engine. The code has been refactored and improved so that
it can be used more easily in other projects.
There is extensive documentation in src/double-conversion.h. Other examples can
be found in test/cctest/test-conversions.cc.
Building
========
This library can be built with scons [0] or cmake [1].
The checked-in Makefile simply forwards to scons, and provides a
shortcut to run all tests:
make
make test
Scons
-----
The easiest way to install this library is to use `scons`. It builds
the static and shared library, and is set up to install those at the
correct locations:
scons install
Use the `DESTDIR` option to change the target directory:
scons DESTDIR=alternative_directory install
Cmake
-----
To use cmake run `cmake .` in the root directory. This overwrites the
existing Makefile.
Use `-DBUILD_SHARED_LIBS=ON` to enable the compilation of shared libraries.
Note that this disables static libraries. There is currently no way to
build both libraries at the same time with cmake.
Use `-DBUILD_TESTING=ON` to build the test executable.
cmake . -DBUILD_TESTING=ON
make
test/cctest/cctest --list | tr -d '<' | xargs test/cctest/cctest
[0]: http://www.scons.org
[1]: http://www.cmake.org

View File

@ -0,0 +1,641 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <math.h>
#include "bignum-dtoa.h"
#include "bignum.h"
#include "ieee.h"
namespace double_conversion {
static int NormalizedExponent(uint64_t significand, int exponent) {
ASSERT(significand != 0);
while ((significand & Double::kHiddenBit) == 0) {
significand = significand << 1;
exponent = exponent - 1;
}
return exponent;
}
// Forward declarations:
// Returns an estimation of k such that 10^(k-1) <= v < 10^k.
static int EstimatePower(int exponent);
// Computes v / 10^estimated_power exactly, as a ratio of two bignums, numerator
// and denominator.
static void InitialScaledStartValues(uint64_t significand,
int exponent,
bool lower_boundary_is_closer,
int estimated_power,
bool need_boundary_deltas,
Bignum* numerator,
Bignum* denominator,
Bignum* delta_minus,
Bignum* delta_plus);
// Multiplies numerator/denominator so that its values lies in the range 1-10.
// Returns decimal_point s.t.
// v = numerator'/denominator' * 10^(decimal_point-1)
// where numerator' and denominator' are the values of numerator and
// denominator after the call to this function.
static void FixupMultiply10(int estimated_power, bool is_even,
int* decimal_point,
Bignum* numerator, Bignum* denominator,
Bignum* delta_minus, Bignum* delta_plus);
// Generates digits from the left to the right and stops when the generated
// digits yield the shortest decimal representation of v.
static void GenerateShortestDigits(Bignum* numerator, Bignum* denominator,
Bignum* delta_minus, Bignum* delta_plus,
bool is_even,
Vector<char> buffer, int* length);
// Generates 'requested_digits' after the decimal point.
static void BignumToFixed(int requested_digits, int* decimal_point,
Bignum* numerator, Bignum* denominator,
Vector<char>(buffer), int* length);
// Generates 'count' digits of numerator/denominator.
// Once 'count' digits have been produced rounds the result depending on the
// remainder (remainders of exactly .5 round upwards). Might update the
// decimal_point when rounding up (for example for 0.9999).
static void GenerateCountedDigits(int count, int* decimal_point,
Bignum* numerator, Bignum* denominator,
Vector<char>(buffer), int* length);
void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits,
Vector<char> buffer, int* length, int* decimal_point) {
ASSERT(v > 0);
ASSERT(!Double(v).IsSpecial());
uint64_t significand;
int exponent;
bool lower_boundary_is_closer;
if (mode == BIGNUM_DTOA_SHORTEST_SINGLE) {
float f = static_cast<float>(v);
ASSERT(f == v);
significand = Single(f).Significand();
exponent = Single(f).Exponent();
lower_boundary_is_closer = Single(f).LowerBoundaryIsCloser();
} else {
significand = Double(v).Significand();
exponent = Double(v).Exponent();
lower_boundary_is_closer = Double(v).LowerBoundaryIsCloser();
}
bool need_boundary_deltas =
(mode == BIGNUM_DTOA_SHORTEST || mode == BIGNUM_DTOA_SHORTEST_SINGLE);
bool is_even = (significand & 1) == 0;
int normalized_exponent = NormalizedExponent(significand, exponent);
// estimated_power might be too low by 1.
int estimated_power = EstimatePower(normalized_exponent);
// Shortcut for Fixed.
// The requested digits correspond to the digits after the point. If the
// number is much too small, then there is no need in trying to get any
// digits.
if (mode == BIGNUM_DTOA_FIXED && -estimated_power - 1 > requested_digits) {
buffer[0] = '\0';
*length = 0;
// Set decimal-point to -requested_digits. This is what Gay does.
// Note that it should not have any effect anyways since the string is
// empty.
*decimal_point = -requested_digits;
return;
}
Bignum numerator;
Bignum denominator;
Bignum delta_minus;
Bignum delta_plus;
// Make sure the bignum can grow large enough. The smallest double equals
// 4e-324. In this case the denominator needs fewer than 324*4 binary digits.
// The maximum double is 1.7976931348623157e308 which needs fewer than
// 308*4 binary digits.
ASSERT(Bignum::kMaxSignificantBits >= 324*4);
InitialScaledStartValues(significand, exponent, lower_boundary_is_closer,
estimated_power, need_boundary_deltas,
&numerator, &denominator,
&delta_minus, &delta_plus);
// We now have v = (numerator / denominator) * 10^estimated_power.
FixupMultiply10(estimated_power, is_even, decimal_point,
&numerator, &denominator,
&delta_minus, &delta_plus);
// We now have v = (numerator / denominator) * 10^(decimal_point-1), and
// 1 <= (numerator + delta_plus) / denominator < 10
switch (mode) {
case BIGNUM_DTOA_SHORTEST:
case BIGNUM_DTOA_SHORTEST_SINGLE:
GenerateShortestDigits(&numerator, &denominator,
&delta_minus, &delta_plus,
is_even, buffer, length);
break;
case BIGNUM_DTOA_FIXED:
BignumToFixed(requested_digits, decimal_point,
&numerator, &denominator,
buffer, length);
break;
case BIGNUM_DTOA_PRECISION:
GenerateCountedDigits(requested_digits, decimal_point,
&numerator, &denominator,
buffer, length);
break;
default:
UNREACHABLE();
}
buffer[*length] = '\0';
}
// The procedure starts generating digits from the left to the right and stops
// when the generated digits yield the shortest decimal representation of v. A
// decimal representation of v is a number lying closer to v than to any other
// double, so it converts to v when read.
//
// This is true if d, the decimal representation, is between m- and m+, the
// upper and lower boundaries. d must be strictly between them if !is_even.
// m- := (numerator - delta_minus) / denominator
// m+ := (numerator + delta_plus) / denominator
//
// Precondition: 0 <= (numerator+delta_plus) / denominator < 10.
// If 1 <= (numerator+delta_plus) / denominator < 10 then no leading 0 digit
// will be produced. This should be the standard precondition.
static void GenerateShortestDigits(Bignum* numerator, Bignum* denominator,
Bignum* delta_minus, Bignum* delta_plus,
bool is_even,
Vector<char> buffer, int* length) {
// Small optimization: if delta_minus and delta_plus are the same just reuse
// one of the two bignums.
if (Bignum::Equal(*delta_minus, *delta_plus)) {
delta_plus = delta_minus;
}
*length = 0;
for (;;) {
uint16_t digit;
digit = numerator->DivideModuloIntBignum(*denominator);
ASSERT(digit <= 9); // digit is a uint16_t and therefore always positive.
// digit = numerator / denominator (integer division).
// numerator = numerator % denominator.
buffer[(*length)++] = static_cast<char>(digit + '0');
// Can we stop already?
// If the remainder of the division is less than the distance to the lower
// boundary we can stop. In this case we simply round down (discarding the
// remainder).
// Similarly we test if we can round up (using the upper boundary).
bool in_delta_room_minus;
bool in_delta_room_plus;
if (is_even) {
in_delta_room_minus = Bignum::LessEqual(*numerator, *delta_minus);
} else {
in_delta_room_minus = Bignum::Less(*numerator, *delta_minus);
}
if (is_even) {
in_delta_room_plus =
Bignum::PlusCompare(*numerator, *delta_plus, *denominator) >= 0;
} else {
in_delta_room_plus =
Bignum::PlusCompare(*numerator, *delta_plus, *denominator) > 0;
}
if (!in_delta_room_minus && !in_delta_room_plus) {
// Prepare for next iteration.
numerator->Times10();
delta_minus->Times10();
// We optimized delta_plus to be equal to delta_minus (if they share the
// same value). So don't multiply delta_plus if they point to the same
// object.
if (delta_minus != delta_plus) {
delta_plus->Times10();
}
} else if (in_delta_room_minus && in_delta_room_plus) {
// Let's see if 2*numerator < denominator.
// If yes, then the next digit would be < 5 and we can round down.
int compare = Bignum::PlusCompare(*numerator, *numerator, *denominator);
if (compare < 0) {
// Remaining digits are less than .5. -> Round down (== do nothing).
} else if (compare > 0) {
// Remaining digits are more than .5 of denominator. -> Round up.
// Note that the last digit could not be a '9' as otherwise the whole
// loop would have stopped earlier.
// We still have an assert here in case the preconditions were not
// satisfied.
ASSERT(buffer[(*length) - 1] != '9');
buffer[(*length) - 1]++;
} else {
// Halfway case.
// TODO(floitsch): need a way to solve half-way cases.
// For now let's round towards even (since this is what Gay seems to
// do).
if ((buffer[(*length) - 1] - '0') % 2 == 0) {
// Round down => Do nothing.
} else {
ASSERT(buffer[(*length) - 1] != '9');
buffer[(*length) - 1]++;
}
}
return;
} else if (in_delta_room_minus) {
// Round down (== do nothing).
return;
} else { // in_delta_room_plus
// Round up.
// Note again that the last digit could not be '9' since this would have
// stopped the loop earlier.
// We still have an ASSERT here, in case the preconditions were not
// satisfied.
ASSERT(buffer[(*length) -1] != '9');
buffer[(*length) - 1]++;
return;
}
}
}
// Let v = numerator / denominator < 10.
// Then we generate 'count' digits of d = x.xxxxx... (without the decimal point)
// from left to right. Once 'count' digits have been produced we decide wether
// to round up or down. Remainders of exactly .5 round upwards. Numbers such
// as 9.999999 propagate a carry all the way, and change the
// exponent (decimal_point), when rounding upwards.
static void GenerateCountedDigits(int count, int* decimal_point,
Bignum* numerator, Bignum* denominator,
Vector<char> buffer, int* length) {
ASSERT(count >= 0);
for (int i = 0; i < count - 1; ++i) {
uint16_t digit;
digit = numerator->DivideModuloIntBignum(*denominator);
ASSERT(digit <= 9); // digit is a uint16_t and therefore always positive.
// digit = numerator / denominator (integer division).
// numerator = numerator % denominator.
buffer[i] = static_cast<char>(digit + '0');
// Prepare for next iteration.
numerator->Times10();
}
// Generate the last digit.
uint16_t digit;
digit = numerator->DivideModuloIntBignum(*denominator);
if (Bignum::PlusCompare(*numerator, *numerator, *denominator) >= 0) {
digit++;
}
ASSERT(digit <= 10);
buffer[count - 1] = static_cast<char>(digit + '0');
// Correct bad digits (in case we had a sequence of '9's). Propagate the
// carry until we hat a non-'9' or til we reach the first digit.
for (int i = count - 1; i > 0; --i) {
if (buffer[i] != '0' + 10) break;
buffer[i] = '0';
buffer[i - 1]++;
}
if (buffer[0] == '0' + 10) {
// Propagate a carry past the top place.
buffer[0] = '1';
(*decimal_point)++;
}
*length = count;
}
// Generates 'requested_digits' after the decimal point. It might omit
// trailing '0's. If the input number is too small then no digits at all are
// generated (ex.: 2 fixed digits for 0.00001).
//
// Input verifies: 1 <= (numerator + delta) / denominator < 10.
static void BignumToFixed(int requested_digits, int* decimal_point,
Bignum* numerator, Bignum* denominator,
Vector<char>(buffer), int* length) {
// Note that we have to look at more than just the requested_digits, since
// a number could be rounded up. Example: v=0.5 with requested_digits=0.
// Even though the power of v equals 0 we can't just stop here.
if (-(*decimal_point) > requested_digits) {
// The number is definitively too small.
// Ex: 0.001 with requested_digits == 1.
// Set decimal-point to -requested_digits. This is what Gay does.
// Note that it should not have any effect anyways since the string is
// empty.
*decimal_point = -requested_digits;
*length = 0;
return;
} else if (-(*decimal_point) == requested_digits) {
// We only need to verify if the number rounds down or up.
// Ex: 0.04 and 0.06 with requested_digits == 1.
ASSERT(*decimal_point == -requested_digits);
// Initially the fraction lies in range (1, 10]. Multiply the denominator
// by 10 so that we can compare more easily.
denominator->Times10();
if (Bignum::PlusCompare(*numerator, *numerator, *denominator) >= 0) {
// If the fraction is >= 0.5 then we have to include the rounded
// digit.
buffer[0] = '1';
*length = 1;
(*decimal_point)++;
} else {
// Note that we caught most of similar cases earlier.
*length = 0;
}
return;
} else {
// The requested digits correspond to the digits after the point.
// The variable 'needed_digits' includes the digits before the point.
int needed_digits = (*decimal_point) + requested_digits;
GenerateCountedDigits(needed_digits, decimal_point,
numerator, denominator,
buffer, length);
}
}
// Returns an estimation of k such that 10^(k-1) <= v < 10^k where
// v = f * 2^exponent and 2^52 <= f < 2^53.
// v is hence a normalized double with the given exponent. The output is an
// approximation for the exponent of the decimal approimation .digits * 10^k.
//
// The result might undershoot by 1 in which case 10^k <= v < 10^k+1.
// Note: this property holds for v's upper boundary m+ too.
// 10^k <= m+ < 10^k+1.
// (see explanation below).
//
// Examples:
// EstimatePower(0) => 16
// EstimatePower(-52) => 0
//
// Note: e >= 0 => EstimatedPower(e) > 0. No similar claim can be made for e<0.
static int EstimatePower(int exponent) {
// This function estimates log10 of v where v = f*2^e (with e == exponent).
// Note that 10^floor(log10(v)) <= v, but v <= 10^ceil(log10(v)).
// Note that f is bounded by its container size. Let p = 53 (the double's
// significand size). Then 2^(p-1) <= f < 2^p.
//
// Given that log10(v) == log2(v)/log2(10) and e+(len(f)-1) is quite close
// to log2(v) the function is simplified to (e+(len(f)-1)/log2(10)).
// The computed number undershoots by less than 0.631 (when we compute log3
// and not log10).
//
// Optimization: since we only need an approximated result this computation
// can be performed on 64 bit integers. On x86/x64 architecture the speedup is
// not really measurable, though.
//
// Since we want to avoid overshooting we decrement by 1e10 so that
// floating-point imprecisions don't affect us.
//
// Explanation for v's boundary m+: the computation takes advantage of
// the fact that 2^(p-1) <= f < 2^p. Boundaries still satisfy this requirement
// (even for denormals where the delta can be much more important).
const double k1Log10 = 0.30102999566398114; // 1/lg(10)
// For doubles len(f) == 53 (don't forget the hidden bit).
const int kSignificandSize = Double::kSignificandSize;
double estimate = ceil((exponent + kSignificandSize - 1) * k1Log10 - 1e-10);
return static_cast<int>(estimate);
}
// See comments for InitialScaledStartValues.
static void InitialScaledStartValuesPositiveExponent(
uint64_t significand, int exponent,
int estimated_power, bool need_boundary_deltas,
Bignum* numerator, Bignum* denominator,
Bignum* delta_minus, Bignum* delta_plus) {
// A positive exponent implies a positive power.
ASSERT(estimated_power >= 0);
// Since the estimated_power is positive we simply multiply the denominator
// by 10^estimated_power.
// numerator = v.
numerator->AssignUInt64(significand);
numerator->ShiftLeft(exponent);
// denominator = 10^estimated_power.
denominator->AssignPowerUInt16(10, estimated_power);
if (need_boundary_deltas) {
// Introduce a common denominator so that the deltas to the boundaries are
// integers.
denominator->ShiftLeft(1);
numerator->ShiftLeft(1);
// Let v = f * 2^e, then m+ - v = 1/2 * 2^e; With the common
// denominator (of 2) delta_plus equals 2^e.
delta_plus->AssignUInt16(1);
delta_plus->ShiftLeft(exponent);
// Same for delta_minus. The adjustments if f == 2^p-1 are done later.
delta_minus->AssignUInt16(1);
delta_minus->ShiftLeft(exponent);
}
}
// See comments for InitialScaledStartValues
static void InitialScaledStartValuesNegativeExponentPositivePower(
uint64_t significand, int exponent,
int estimated_power, bool need_boundary_deltas,
Bignum* numerator, Bignum* denominator,
Bignum* delta_minus, Bignum* delta_plus) {
// v = f * 2^e with e < 0, and with estimated_power >= 0.
// This means that e is close to 0 (have a look at how estimated_power is
// computed).
// numerator = significand
// since v = significand * 2^exponent this is equivalent to
// numerator = v * / 2^-exponent
numerator->AssignUInt64(significand);
// denominator = 10^estimated_power * 2^-exponent (with exponent < 0)
denominator->AssignPowerUInt16(10, estimated_power);
denominator->ShiftLeft(-exponent);
if (need_boundary_deltas) {
// Introduce a common denominator so that the deltas to the boundaries are
// integers.
denominator->ShiftLeft(1);
numerator->ShiftLeft(1);
// Let v = f * 2^e, then m+ - v = 1/2 * 2^e; With the common
// denominator (of 2) delta_plus equals 2^e.
// Given that the denominator already includes v's exponent the distance
// to the boundaries is simply 1.
delta_plus->AssignUInt16(1);
// Same for delta_minus. The adjustments if f == 2^p-1 are done later.
delta_minus->AssignUInt16(1);
}
}
// See comments for InitialScaledStartValues
static void InitialScaledStartValuesNegativeExponentNegativePower(
uint64_t significand, int exponent,
int estimated_power, bool need_boundary_deltas,
Bignum* numerator, Bignum* denominator,
Bignum* delta_minus, Bignum* delta_plus) {
// Instead of multiplying the denominator with 10^estimated_power we
// multiply all values (numerator and deltas) by 10^-estimated_power.
// Use numerator as temporary container for power_ten.
Bignum* power_ten = numerator;
power_ten->AssignPowerUInt16(10, -estimated_power);
if (need_boundary_deltas) {
// Since power_ten == numerator we must make a copy of 10^estimated_power
// before we complete the computation of the numerator.
// delta_plus = delta_minus = 10^estimated_power
delta_plus->AssignBignum(*power_ten);
delta_minus->AssignBignum(*power_ten);
}
// numerator = significand * 2 * 10^-estimated_power
// since v = significand * 2^exponent this is equivalent to
// numerator = v * 10^-estimated_power * 2 * 2^-exponent.
// Remember: numerator has been abused as power_ten. So no need to assign it
// to itself.
ASSERT(numerator == power_ten);
numerator->MultiplyByUInt64(significand);
// denominator = 2 * 2^-exponent with exponent < 0.
denominator->AssignUInt16(1);
denominator->ShiftLeft(-exponent);
if (need_boundary_deltas) {
// Introduce a common denominator so that the deltas to the boundaries are
// integers.
numerator->ShiftLeft(1);
denominator->ShiftLeft(1);
// With this shift the boundaries have their correct value, since
// delta_plus = 10^-estimated_power, and
// delta_minus = 10^-estimated_power.
// These assignments have been done earlier.
// The adjustments if f == 2^p-1 (lower boundary is closer) are done later.
}
}
// Let v = significand * 2^exponent.
// Computes v / 10^estimated_power exactly, as a ratio of two bignums, numerator
// and denominator. The functions GenerateShortestDigits and
// GenerateCountedDigits will then convert this ratio to its decimal
// representation d, with the required accuracy.
// Then d * 10^estimated_power is the representation of v.
// (Note: the fraction and the estimated_power might get adjusted before
// generating the decimal representation.)
//
// The initial start values consist of:
// - a scaled numerator: s.t. numerator/denominator == v / 10^estimated_power.
// - a scaled (common) denominator.
// optionally (used by GenerateShortestDigits to decide if it has the shortest
// decimal converting back to v):
// - v - m-: the distance to the lower boundary.
// - m+ - v: the distance to the upper boundary.
//
// v, m+, m-, and therefore v - m- and m+ - v all share the same denominator.
//
// Let ep == estimated_power, then the returned values will satisfy:
// v / 10^ep = numerator / denominator.
// v's boundarys m- and m+:
// m- / 10^ep == v / 10^ep - delta_minus / denominator
// m+ / 10^ep == v / 10^ep + delta_plus / denominator
// Or in other words:
// m- == v - delta_minus * 10^ep / denominator;
// m+ == v + delta_plus * 10^ep / denominator;
//
// Since 10^(k-1) <= v < 10^k (with k == estimated_power)
// or 10^k <= v < 10^(k+1)
// we then have 0.1 <= numerator/denominator < 1
// or 1 <= numerator/denominator < 10
//
// It is then easy to kickstart the digit-generation routine.
//
// The boundary-deltas are only filled if the mode equals BIGNUM_DTOA_SHORTEST
// or BIGNUM_DTOA_SHORTEST_SINGLE.
static void InitialScaledStartValues(uint64_t significand,
int exponent,
bool lower_boundary_is_closer,
int estimated_power,
bool need_boundary_deltas,
Bignum* numerator,
Bignum* denominator,
Bignum* delta_minus,
Bignum* delta_plus) {
if (exponent >= 0) {
InitialScaledStartValuesPositiveExponent(
significand, exponent, estimated_power, need_boundary_deltas,
numerator, denominator, delta_minus, delta_plus);
} else if (estimated_power >= 0) {
InitialScaledStartValuesNegativeExponentPositivePower(
significand, exponent, estimated_power, need_boundary_deltas,
numerator, denominator, delta_minus, delta_plus);
} else {
InitialScaledStartValuesNegativeExponentNegativePower(
significand, exponent, estimated_power, need_boundary_deltas,
numerator, denominator, delta_minus, delta_plus);
}
if (need_boundary_deltas && lower_boundary_is_closer) {
// The lower boundary is closer at half the distance of "normal" numbers.
// Increase the common denominator and adapt all but the delta_minus.
denominator->ShiftLeft(1); // *2
numerator->ShiftLeft(1); // *2
delta_plus->ShiftLeft(1); // *2
}
}
// This routine multiplies numerator/denominator so that its values lies in the
// range 1-10. That is after a call to this function we have:
// 1 <= (numerator + delta_plus) /denominator < 10.
// Let numerator the input before modification and numerator' the argument
// after modification, then the output-parameter decimal_point is such that
// numerator / denominator * 10^estimated_power ==
// numerator' / denominator' * 10^(decimal_point - 1)
// In some cases estimated_power was too low, and this is already the case. We
// then simply adjust the power so that 10^(k-1) <= v < 10^k (with k ==
// estimated_power) but do not touch the numerator or denominator.
// Otherwise the routine multiplies the numerator and the deltas by 10.
static void FixupMultiply10(int estimated_power, bool is_even,
int* decimal_point,
Bignum* numerator, Bignum* denominator,
Bignum* delta_minus, Bignum* delta_plus) {
bool in_range;
if (is_even) {
// For IEEE doubles half-way cases (in decimal system numbers ending with 5)
// are rounded to the closest floating-point number with even significand.
in_range = Bignum::PlusCompare(*numerator, *delta_plus, *denominator) >= 0;
} else {
in_range = Bignum::PlusCompare(*numerator, *delta_plus, *denominator) > 0;
}
if (in_range) {
// Since numerator + delta_plus >= denominator we already have
// 1 <= numerator/denominator < 10. Simply update the estimated_power.
*decimal_point = estimated_power + 1;
} else {
*decimal_point = estimated_power;
numerator->Times10();
if (Bignum::Equal(*delta_minus, *delta_plus)) {
delta_minus->Times10();
delta_plus->AssignBignum(*delta_minus);
} else {
delta_minus->Times10();
delta_plus->Times10();
}
}
}
} // namespace double_conversion

View File

@ -0,0 +1,84 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef DOUBLE_CONVERSION_BIGNUM_DTOA_H_
#define DOUBLE_CONVERSION_BIGNUM_DTOA_H_
#include "utils.h"
namespace double_conversion {
enum BignumDtoaMode {
// Return the shortest correct representation.
// For example the output of 0.299999999999999988897 is (the less accurate but
// correct) 0.3.
BIGNUM_DTOA_SHORTEST,
// Same as BIGNUM_DTOA_SHORTEST but for single-precision floats.
BIGNUM_DTOA_SHORTEST_SINGLE,
// Return a fixed number of digits after the decimal point.
// For instance fixed(0.1, 4) becomes 0.1000
// If the input number is big, the output will be big.
BIGNUM_DTOA_FIXED,
// Return a fixed number of digits, no matter what the exponent is.
BIGNUM_DTOA_PRECISION
};
// Converts the given double 'v' to ascii.
// The result should be interpreted as buffer * 10^(point-length).
// The buffer will be null-terminated.
//
// The input v must be > 0 and different from NaN, and Infinity.
//
// The output depends on the given mode:
// - SHORTEST: produce the least amount of digits for which the internal
// identity requirement is still satisfied. If the digits are printed
// (together with the correct exponent) then reading this number will give
// 'v' again. The buffer will choose the representation that is closest to
// 'v'. If there are two at the same distance, than the number is round up.
// In this mode the 'requested_digits' parameter is ignored.
// - FIXED: produces digits necessary to print a given number with
// 'requested_digits' digits after the decimal point. The produced digits
// might be too short in which case the caller has to fill the gaps with '0's.
// Example: toFixed(0.001, 5) is allowed to return buffer="1", point=-2.
// Halfway cases are rounded up. The call toFixed(0.15, 2) thus returns
// buffer="2", point=0.
// Note: the length of the returned buffer has no meaning wrt the significance
// of its digits. That is, just because it contains '0's does not mean that
// any other digit would not satisfy the internal identity requirement.
// - PRECISION: produces 'requested_digits' where the first digit is not '0'.
// Even though the length of produced digits usually equals
// 'requested_digits', the function is allowed to return fewer digits, in
// which case the caller has to fill the missing digits with '0's.
// Halfway cases are again rounded up.
// 'BignumDtoa' expects the given buffer to be big enough to hold all digits
// and a terminating null-character.
void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits,
Vector<char> buffer, int* length, int* point);
} // namespace double_conversion
#endif // DOUBLE_CONVERSION_BIGNUM_DTOA_H_

View File

@ -0,0 +1,766 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "bignum.h"
#include "utils.h"
namespace double_conversion {
Bignum::Bignum()
: bigits_(bigits_buffer_, kBigitCapacity), used_digits_(0), exponent_(0) {
for (int i = 0; i < kBigitCapacity; ++i) {
bigits_[i] = 0;
}
}
template<typename S>
static int BitSize(S value) {
(void) value; // Mark variable as used.
return 8 * sizeof(value);
}
// Guaranteed to lie in one Bigit.
void Bignum::AssignUInt16(uint16_t value) {
ASSERT(kBigitSize >= BitSize(value));
Zero();
if (value == 0) return;
EnsureCapacity(1);
bigits_[0] = value;
used_digits_ = 1;
}
void Bignum::AssignUInt64(uint64_t value) {
const int kUInt64Size = 64;
Zero();
if (value == 0) return;
int needed_bigits = kUInt64Size / kBigitSize + 1;
EnsureCapacity(needed_bigits);
for (int i = 0; i < needed_bigits; ++i) {
bigits_[i] = value & kBigitMask;
value = value >> kBigitSize;
}
used_digits_ = needed_bigits;
Clamp();
}
void Bignum::AssignBignum(const Bignum& other) {
exponent_ = other.exponent_;
for (int i = 0; i < other.used_digits_; ++i) {
bigits_[i] = other.bigits_[i];
}
// Clear the excess digits (if there were any).
for (int i = other.used_digits_; i < used_digits_; ++i) {
bigits_[i] = 0;
}
used_digits_ = other.used_digits_;
}
static uint64_t ReadUInt64(Vector<const char> buffer,
int from,
int digits_to_read) {
uint64_t result = 0;
for (int i = from; i < from + digits_to_read; ++i) {
int digit = buffer[i] - '0';
ASSERT(0 <= digit && digit <= 9);
result = result * 10 + digit;
}
return result;
}
void Bignum::AssignDecimalString(Vector<const char> value) {
// 2^64 = 18446744073709551616 > 10^19
const int kMaxUint64DecimalDigits = 19;
Zero();
int length = value.length();
int pos = 0;
// Let's just say that each digit needs 4 bits.
while (length >= kMaxUint64DecimalDigits) {
uint64_t digits = ReadUInt64(value, pos, kMaxUint64DecimalDigits);
pos += kMaxUint64DecimalDigits;
length -= kMaxUint64DecimalDigits;
MultiplyByPowerOfTen(kMaxUint64DecimalDigits);
AddUInt64(digits);
}
uint64_t digits = ReadUInt64(value, pos, length);
MultiplyByPowerOfTen(length);
AddUInt64(digits);
Clamp();
}
static int HexCharValue(char c) {
if ('0' <= c && c <= '9') return c - '0';
if ('a' <= c && c <= 'f') return 10 + c - 'a';
ASSERT('A' <= c && c <= 'F');
return 10 + c - 'A';
}
void Bignum::AssignHexString(Vector<const char> value) {
Zero();
int length = value.length();
int needed_bigits = length * 4 / kBigitSize + 1;
EnsureCapacity(needed_bigits);
int string_index = length - 1;
for (int i = 0; i < needed_bigits - 1; ++i) {
// These bigits are guaranteed to be "full".
Chunk current_bigit = 0;
for (int j = 0; j < kBigitSize / 4; j++) {
current_bigit += HexCharValue(value[string_index--]) << (j * 4);
}
bigits_[i] = current_bigit;
}
used_digits_ = needed_bigits - 1;
Chunk most_significant_bigit = 0; // Could be = 0;
for (int j = 0; j <= string_index; ++j) {
most_significant_bigit <<= 4;
most_significant_bigit += HexCharValue(value[j]);
}
if (most_significant_bigit != 0) {
bigits_[used_digits_] = most_significant_bigit;
used_digits_++;
}
Clamp();
}
void Bignum::AddUInt64(uint64_t operand) {
if (operand == 0) return;
Bignum other;
other.AssignUInt64(operand);
AddBignum(other);
}
void Bignum::AddBignum(const Bignum& other) {
ASSERT(IsClamped());
ASSERT(other.IsClamped());
// If this has a greater exponent than other append zero-bigits to this.
// After this call exponent_ <= other.exponent_.
Align(other);
// There are two possibilities:
// aaaaaaaaaaa 0000 (where the 0s represent a's exponent)
// bbbbb 00000000
// ----------------
// ccccccccccc 0000
// or
// aaaaaaaaaa 0000
// bbbbbbbbb 0000000
// -----------------
// cccccccccccc 0000
// In both cases we might need a carry bigit.
EnsureCapacity(1 + Max(BigitLength(), other.BigitLength()) - exponent_);
Chunk carry = 0;
int bigit_pos = other.exponent_ - exponent_;
ASSERT(bigit_pos >= 0);
for (int i = 0; i < other.used_digits_; ++i) {
Chunk sum = bigits_[bigit_pos] + other.bigits_[i] + carry;
bigits_[bigit_pos] = sum & kBigitMask;
carry = sum >> kBigitSize;
bigit_pos++;
}
while (carry != 0) {
Chunk sum = bigits_[bigit_pos] + carry;
bigits_[bigit_pos] = sum & kBigitMask;
carry = sum >> kBigitSize;
bigit_pos++;
}
used_digits_ = Max(bigit_pos, used_digits_);
ASSERT(IsClamped());
}
void Bignum::SubtractBignum(const Bignum& other) {
ASSERT(IsClamped());
ASSERT(other.IsClamped());
// We require this to be bigger than other.
ASSERT(LessEqual(other, *this));
Align(other);
int offset = other.exponent_ - exponent_;
Chunk borrow = 0;
int i;
for (i = 0; i < other.used_digits_; ++i) {
ASSERT((borrow == 0) || (borrow == 1));
Chunk difference = bigits_[i + offset] - other.bigits_[i] - borrow;
bigits_[i + offset] = difference & kBigitMask;
borrow = difference >> (kChunkSize - 1);
}
while (borrow != 0) {
Chunk difference = bigits_[i + offset] - borrow;
bigits_[i + offset] = difference & kBigitMask;
borrow = difference >> (kChunkSize - 1);
++i;
}
Clamp();
}
void Bignum::ShiftLeft(int shift_amount) {
if (used_digits_ == 0) return;
exponent_ += shift_amount / kBigitSize;
int local_shift = shift_amount % kBigitSize;
EnsureCapacity(used_digits_ + 1);
BigitsShiftLeft(local_shift);
}
void Bignum::MultiplyByUInt32(uint32_t factor) {
if (factor == 1) return;
if (factor == 0) {
Zero();
return;
}
if (used_digits_ == 0) return;
// The product of a bigit with the factor is of size kBigitSize + 32.
// Assert that this number + 1 (for the carry) fits into double chunk.
ASSERT(kDoubleChunkSize >= kBigitSize + 32 + 1);
DoubleChunk carry = 0;
for (int i = 0; i < used_digits_; ++i) {
DoubleChunk product = static_cast<DoubleChunk>(factor) * bigits_[i] + carry;
bigits_[i] = static_cast<Chunk>(product & kBigitMask);
carry = (product >> kBigitSize);
}
while (carry != 0) {
EnsureCapacity(used_digits_ + 1);
bigits_[used_digits_] = carry & kBigitMask;
used_digits_++;
carry >>= kBigitSize;
}
}
void Bignum::MultiplyByUInt64(uint64_t factor) {
if (factor == 1) return;
if (factor == 0) {
Zero();
return;
}
ASSERT(kBigitSize < 32);
uint64_t carry = 0;
uint64_t low = factor & 0xFFFFFFFF;
uint64_t high = factor >> 32;
for (int i = 0; i < used_digits_; ++i) {
uint64_t product_low = low * bigits_[i];
uint64_t product_high = high * bigits_[i];
uint64_t tmp = (carry & kBigitMask) + product_low;
bigits_[i] = tmp & kBigitMask;
carry = (carry >> kBigitSize) + (tmp >> kBigitSize) +
(product_high << (32 - kBigitSize));
}
while (carry != 0) {
EnsureCapacity(used_digits_ + 1);
bigits_[used_digits_] = carry & kBigitMask;
used_digits_++;
carry >>= kBigitSize;
}
}
void Bignum::MultiplyByPowerOfTen(int exponent) {
const uint64_t kFive27 = UINT64_2PART_C(0x6765c793, fa10079d);
const uint16_t kFive1 = 5;
const uint16_t kFive2 = kFive1 * 5;
const uint16_t kFive3 = kFive2 * 5;
const uint16_t kFive4 = kFive3 * 5;
const uint16_t kFive5 = kFive4 * 5;
const uint16_t kFive6 = kFive5 * 5;
const uint32_t kFive7 = kFive6 * 5;
const uint32_t kFive8 = kFive7 * 5;
const uint32_t kFive9 = kFive8 * 5;
const uint32_t kFive10 = kFive9 * 5;
const uint32_t kFive11 = kFive10 * 5;
const uint32_t kFive12 = kFive11 * 5;
const uint32_t kFive13 = kFive12 * 5;
const uint32_t kFive1_to_12[] =
{ kFive1, kFive2, kFive3, kFive4, kFive5, kFive6,
kFive7, kFive8, kFive9, kFive10, kFive11, kFive12 };
ASSERT(exponent >= 0);
if (exponent == 0) return;
if (used_digits_ == 0) return;
// We shift by exponent at the end just before returning.
int remaining_exponent = exponent;
while (remaining_exponent >= 27) {
MultiplyByUInt64(kFive27);
remaining_exponent -= 27;
}
while (remaining_exponent >= 13) {
MultiplyByUInt32(kFive13);
remaining_exponent -= 13;
}
if (remaining_exponent > 0) {
MultiplyByUInt32(kFive1_to_12[remaining_exponent - 1]);
}
ShiftLeft(exponent);
}
void Bignum::Square() {
ASSERT(IsClamped());
int product_length = 2 * used_digits_;
EnsureCapacity(product_length);
// Comba multiplication: compute each column separately.
// Example: r = a2a1a0 * b2b1b0.
// r = 1 * a0b0 +
// 10 * (a1b0 + a0b1) +
// 100 * (a2b0 + a1b1 + a0b2) +
// 1000 * (a2b1 + a1b2) +
// 10000 * a2b2
//
// In the worst case we have to accumulate nb-digits products of digit*digit.
//
// Assert that the additional number of bits in a DoubleChunk are enough to
// sum up used_digits of Bigit*Bigit.
if ((1 << (2 * (kChunkSize - kBigitSize))) <= used_digits_) {
UNIMPLEMENTED();
}
DoubleChunk accumulator = 0;
// First shift the digits so we don't overwrite them.
int copy_offset = used_digits_;
for (int i = 0; i < used_digits_; ++i) {
bigits_[copy_offset + i] = bigits_[i];
}
// We have two loops to avoid some 'if's in the loop.
for (int i = 0; i < used_digits_; ++i) {
// Process temporary digit i with power i.
// The sum of the two indices must be equal to i.
int bigit_index1 = i;
int bigit_index2 = 0;
// Sum all of the sub-products.
while (bigit_index1 >= 0) {
Chunk chunk1 = bigits_[copy_offset + bigit_index1];
Chunk chunk2 = bigits_[copy_offset + bigit_index2];
accumulator += static_cast<DoubleChunk>(chunk1) * chunk2;
bigit_index1--;
bigit_index2++;
}
bigits_[i] = static_cast<Chunk>(accumulator) & kBigitMask;
accumulator >>= kBigitSize;
}
for (int i = used_digits_; i < product_length; ++i) {
int bigit_index1 = used_digits_ - 1;
int bigit_index2 = i - bigit_index1;
// Invariant: sum of both indices is again equal to i.
// Inner loop runs 0 times on last iteration, emptying accumulator.
while (bigit_index2 < used_digits_) {
Chunk chunk1 = bigits_[copy_offset + bigit_index1];
Chunk chunk2 = bigits_[copy_offset + bigit_index2];
accumulator += static_cast<DoubleChunk>(chunk1) * chunk2;
bigit_index1--;
bigit_index2++;
}
// The overwritten bigits_[i] will never be read in further loop iterations,
// because bigit_index1 and bigit_index2 are always greater
// than i - used_digits_.
bigits_[i] = static_cast<Chunk>(accumulator) & kBigitMask;
accumulator >>= kBigitSize;
}
// Since the result was guaranteed to lie inside the number the
// accumulator must be 0 now.
ASSERT(accumulator == 0);
// Don't forget to update the used_digits and the exponent.
used_digits_ = product_length;
exponent_ *= 2;
Clamp();
}
void Bignum::AssignPowerUInt16(uint16_t base, int power_exponent) {
ASSERT(base != 0);
ASSERT(power_exponent >= 0);
if (power_exponent == 0) {
AssignUInt16(1);
return;
}
Zero();
int shifts = 0;
// We expect base to be in range 2-32, and most often to be 10.
// It does not make much sense to implement different algorithms for counting
// the bits.
while ((base & 1) == 0) {
base >>= 1;
shifts++;
}
int bit_size = 0;
int tmp_base = base;
while (tmp_base != 0) {
tmp_base >>= 1;
bit_size++;
}
int final_size = bit_size * power_exponent;
// 1 extra bigit for the shifting, and one for rounded final_size.
EnsureCapacity(final_size / kBigitSize + 2);
// Left to Right exponentiation.
int mask = 1;
while (power_exponent >= mask) mask <<= 1;
// The mask is now pointing to the bit above the most significant 1-bit of
// power_exponent.
// Get rid of first 1-bit;
mask >>= 2;
uint64_t this_value = base;
bool delayed_multipliciation = false;
const uint64_t max_32bits = 0xFFFFFFFF;
while (mask != 0 && this_value <= max_32bits) {
this_value = this_value * this_value;
// Verify that there is enough space in this_value to perform the
// multiplication. The first bit_size bits must be 0.
if ((power_exponent & mask) != 0) {
uint64_t base_bits_mask =
~((static_cast<uint64_t>(1) << (64 - bit_size)) - 1);
bool high_bits_zero = (this_value & base_bits_mask) == 0;
if (high_bits_zero) {
this_value *= base;
} else {
delayed_multipliciation = true;
}
}
mask >>= 1;
}
AssignUInt64(this_value);
if (delayed_multipliciation) {
MultiplyByUInt32(base);
}
// Now do the same thing as a bignum.
while (mask != 0) {
Square();
if ((power_exponent & mask) != 0) {
MultiplyByUInt32(base);
}
mask >>= 1;
}
// And finally add the saved shifts.
ShiftLeft(shifts * power_exponent);
}
// Precondition: this/other < 16bit.
uint16_t Bignum::DivideModuloIntBignum(const Bignum& other) {
ASSERT(IsClamped());
ASSERT(other.IsClamped());
ASSERT(other.used_digits_ > 0);
// Easy case: if we have less digits than the divisor than the result is 0.
// Note: this handles the case where this == 0, too.
if (BigitLength() < other.BigitLength()) {
return 0;
}
Align(other);
uint16_t result = 0;
// Start by removing multiples of 'other' until both numbers have the same
// number of digits.
while (BigitLength() > other.BigitLength()) {
// This naive approach is extremely inefficient if `this` divided by other
// is big. This function is implemented for doubleToString where
// the result should be small (less than 10).
ASSERT(other.bigits_[other.used_digits_ - 1] >= ((1 << kBigitSize) / 16));
ASSERT(bigits_[used_digits_ - 1] < 0x10000);
// Remove the multiples of the first digit.
// Example this = 23 and other equals 9. -> Remove 2 multiples.
result += static_cast<uint16_t>(bigits_[used_digits_ - 1]);
SubtractTimes(other, bigits_[used_digits_ - 1]);
}
ASSERT(BigitLength() == other.BigitLength());
// Both bignums are at the same length now.
// Since other has more than 0 digits we know that the access to
// bigits_[used_digits_ - 1] is safe.
Chunk this_bigit = bigits_[used_digits_ - 1];
Chunk other_bigit = other.bigits_[other.used_digits_ - 1];
if (other.used_digits_ == 1) {
// Shortcut for easy (and common) case.
int quotient = this_bigit / other_bigit;
bigits_[used_digits_ - 1] = this_bigit - other_bigit * quotient;
ASSERT(quotient < 0x10000);
result += static_cast<uint16_t>(quotient);
Clamp();
return result;
}
int division_estimate = this_bigit / (other_bigit + 1);
ASSERT(division_estimate < 0x10000);
result += static_cast<uint16_t>(division_estimate);
SubtractTimes(other, division_estimate);
if (other_bigit * (division_estimate + 1) > this_bigit) {
// No need to even try to subtract. Even if other's remaining digits were 0
// another subtraction would be too much.
return result;
}
while (LessEqual(other, *this)) {
SubtractBignum(other);
result++;
}
return result;
}
template<typename S>
static int SizeInHexChars(S number) {
ASSERT(number > 0);
int result = 0;
while (number != 0) {
number >>= 4;
result++;
}
return result;
}
static char HexCharOfValue(int value) {
ASSERT(0 <= value && value <= 16);
if (value < 10) return static_cast<char>(value + '0');
return static_cast<char>(value - 10 + 'A');
}
bool Bignum::ToHexString(char* buffer, int buffer_size) const {
ASSERT(IsClamped());
// Each bigit must be printable as separate hex-character.
ASSERT(kBigitSize % 4 == 0);
const int kHexCharsPerBigit = kBigitSize / 4;
if (used_digits_ == 0) {
if (buffer_size < 2) return false;
buffer[0] = '0';
buffer[1] = '\0';
return true;
}
// We add 1 for the terminating '\0' character.
int needed_chars = (BigitLength() - 1) * kHexCharsPerBigit +
SizeInHexChars(bigits_[used_digits_ - 1]) + 1;
if (needed_chars > buffer_size) return false;
int string_index = needed_chars - 1;
buffer[string_index--] = '\0';
for (int i = 0; i < exponent_; ++i) {
for (int j = 0; j < kHexCharsPerBigit; ++j) {
buffer[string_index--] = '0';
}
}
for (int i = 0; i < used_digits_ - 1; ++i) {
Chunk current_bigit = bigits_[i];
for (int j = 0; j < kHexCharsPerBigit; ++j) {
buffer[string_index--] = HexCharOfValue(current_bigit & 0xF);
current_bigit >>= 4;
}
}
// And finally the last bigit.
Chunk most_significant_bigit = bigits_[used_digits_ - 1];
while (most_significant_bigit != 0) {
buffer[string_index--] = HexCharOfValue(most_significant_bigit & 0xF);
most_significant_bigit >>= 4;
}
return true;
}
Bignum::Chunk Bignum::BigitAt(int index) const {
if (index >= BigitLength()) return 0;
if (index < exponent_) return 0;
return bigits_[index - exponent_];
}
int Bignum::Compare(const Bignum& a, const Bignum& b) {
ASSERT(a.IsClamped());
ASSERT(b.IsClamped());
int bigit_length_a = a.BigitLength();
int bigit_length_b = b.BigitLength();
if (bigit_length_a < bigit_length_b) return -1;
if (bigit_length_a > bigit_length_b) return +1;
for (int i = bigit_length_a - 1; i >= Min(a.exponent_, b.exponent_); --i) {
Chunk bigit_a = a.BigitAt(i);
Chunk bigit_b = b.BigitAt(i);
if (bigit_a < bigit_b) return -1;
if (bigit_a > bigit_b) return +1;
// Otherwise they are equal up to this digit. Try the next digit.
}
return 0;
}
int Bignum::PlusCompare(const Bignum& a, const Bignum& b, const Bignum& c) {
ASSERT(a.IsClamped());
ASSERT(b.IsClamped());
ASSERT(c.IsClamped());
if (a.BigitLength() < b.BigitLength()) {
return PlusCompare(b, a, c);
}
if (a.BigitLength() + 1 < c.BigitLength()) return -1;
if (a.BigitLength() > c.BigitLength()) return +1;
// The exponent encodes 0-bigits. So if there are more 0-digits in 'a' than
// 'b' has digits, then the bigit-length of 'a'+'b' must be equal to the one
// of 'a'.
if (a.exponent_ >= b.BigitLength() && a.BigitLength() < c.BigitLength()) {
return -1;
}
Chunk borrow = 0;
// Starting at min_exponent all digits are == 0. So no need to compare them.
int min_exponent = Min(Min(a.exponent_, b.exponent_), c.exponent_);
for (int i = c.BigitLength() - 1; i >= min_exponent; --i) {
Chunk chunk_a = a.BigitAt(i);
Chunk chunk_b = b.BigitAt(i);
Chunk chunk_c = c.BigitAt(i);
Chunk sum = chunk_a + chunk_b;
if (sum > chunk_c + borrow) {
return +1;
} else {
borrow = chunk_c + borrow - sum;
if (borrow > 1) return -1;
borrow <<= kBigitSize;
}
}
if (borrow == 0) return 0;
return -1;
}
void Bignum::Clamp() {
while (used_digits_ > 0 && bigits_[used_digits_ - 1] == 0) {
used_digits_--;
}
if (used_digits_ == 0) {
// Zero.
exponent_ = 0;
}
}
bool Bignum::IsClamped() const {
return used_digits_ == 0 || bigits_[used_digits_ - 1] != 0;
}
void Bignum::Zero() {
for (int i = 0; i < used_digits_; ++i) {
bigits_[i] = 0;
}
used_digits_ = 0;
exponent_ = 0;
}
void Bignum::Align(const Bignum& other) {
if (exponent_ > other.exponent_) {
// If "X" represents a "hidden" digit (by the exponent) then we are in the
// following case (a == this, b == other):
// a: aaaaaaXXXX or a: aaaaaXXX
// b: bbbbbbX b: bbbbbbbbXX
// We replace some of the hidden digits (X) of a with 0 digits.
// a: aaaaaa000X or a: aaaaa0XX
int zero_digits = exponent_ - other.exponent_;
EnsureCapacity(used_digits_ + zero_digits);
for (int i = used_digits_ - 1; i >= 0; --i) {
bigits_[i + zero_digits] = bigits_[i];
}
for (int i = 0; i < zero_digits; ++i) {
bigits_[i] = 0;
}
used_digits_ += zero_digits;
exponent_ -= zero_digits;
ASSERT(used_digits_ >= 0);
ASSERT(exponent_ >= 0);
}
}
void Bignum::BigitsShiftLeft(int shift_amount) {
ASSERT(shift_amount < kBigitSize);
ASSERT(shift_amount >= 0);
Chunk carry = 0;
for (int i = 0; i < used_digits_; ++i) {
Chunk new_carry = bigits_[i] >> (kBigitSize - shift_amount);
bigits_[i] = ((bigits_[i] << shift_amount) + carry) & kBigitMask;
carry = new_carry;
}
if (carry != 0) {
bigits_[used_digits_] = carry;
used_digits_++;
}
}
void Bignum::SubtractTimes(const Bignum& other, int factor) {
ASSERT(exponent_ <= other.exponent_);
if (factor < 3) {
for (int i = 0; i < factor; ++i) {
SubtractBignum(other);
}
return;
}
Chunk borrow = 0;
int exponent_diff = other.exponent_ - exponent_;
for (int i = 0; i < other.used_digits_; ++i) {
DoubleChunk product = static_cast<DoubleChunk>(factor) * other.bigits_[i];
DoubleChunk remove = borrow + product;
Chunk difference = bigits_[i + exponent_diff] - (remove & kBigitMask);
bigits_[i + exponent_diff] = difference & kBigitMask;
borrow = static_cast<Chunk>((difference >> (kChunkSize - 1)) +
(remove >> kBigitSize));
}
for (int i = other.used_digits_ + exponent_diff; i < used_digits_; ++i) {
if (borrow == 0) return;
Chunk difference = bigits_[i] - borrow;
bigits_[i] = difference & kBigitMask;
borrow = difference >> (kChunkSize - 1);
}
Clamp();
}
} // namespace double_conversion

View File

@ -0,0 +1,145 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef DOUBLE_CONVERSION_BIGNUM_H_
#define DOUBLE_CONVERSION_BIGNUM_H_
#include "utils.h"
namespace double_conversion {
class Bignum {
public:
// 3584 = 128 * 28. We can represent 2^3584 > 10^1000 accurately.
// This bignum can encode much bigger numbers, since it contains an
// exponent.
static const int kMaxSignificantBits = 3584;
Bignum();
void AssignUInt16(uint16_t value);
void AssignUInt64(uint64_t value);
void AssignBignum(const Bignum& other);
void AssignDecimalString(Vector<const char> value);
void AssignHexString(Vector<const char> value);
void AssignPowerUInt16(uint16_t base, int exponent);
void AddUInt16(uint16_t operand);
void AddUInt64(uint64_t operand);
void AddBignum(const Bignum& other);
// Precondition: this >= other.
void SubtractBignum(const Bignum& other);
void Square();
void ShiftLeft(int shift_amount);
void MultiplyByUInt32(uint32_t factor);
void MultiplyByUInt64(uint64_t factor);
void MultiplyByPowerOfTen(int exponent);
void Times10() { return MultiplyByUInt32(10); }
// Pseudocode:
// int result = this / other;
// this = this % other;
// In the worst case this function is in O(this/other).
uint16_t DivideModuloIntBignum(const Bignum& other);
bool ToHexString(char* buffer, int buffer_size) const;
// Returns
// -1 if a < b,
// 0 if a == b, and
// +1 if a > b.
static int Compare(const Bignum& a, const Bignum& b);
static bool Equal(const Bignum& a, const Bignum& b) {
return Compare(a, b) == 0;
}
static bool LessEqual(const Bignum& a, const Bignum& b) {
return Compare(a, b) <= 0;
}
static bool Less(const Bignum& a, const Bignum& b) {
return Compare(a, b) < 0;
}
// Returns Compare(a + b, c);
static int PlusCompare(const Bignum& a, const Bignum& b, const Bignum& c);
// Returns a + b == c
static bool PlusEqual(const Bignum& a, const Bignum& b, const Bignum& c) {
return PlusCompare(a, b, c) == 0;
}
// Returns a + b <= c
static bool PlusLessEqual(const Bignum& a, const Bignum& b, const Bignum& c) {
return PlusCompare(a, b, c) <= 0;
}
// Returns a + b < c
static bool PlusLess(const Bignum& a, const Bignum& b, const Bignum& c) {
return PlusCompare(a, b, c) < 0;
}
private:
typedef uint32_t Chunk;
typedef uint64_t DoubleChunk;
static const int kChunkSize = sizeof(Chunk) * 8;
static const int kDoubleChunkSize = sizeof(DoubleChunk) * 8;
// With bigit size of 28 we loose some bits, but a double still fits easily
// into two chunks, and more importantly we can use the Comba multiplication.
static const int kBigitSize = 28;
static const Chunk kBigitMask = (1 << kBigitSize) - 1;
// Every instance allocates kBigitLength chunks on the stack. Bignums cannot
// grow. There are no checks if the stack-allocated space is sufficient.
static const int kBigitCapacity = kMaxSignificantBits / kBigitSize;
void EnsureCapacity(int size) {
if (size > kBigitCapacity) {
UNREACHABLE();
}
}
void Align(const Bignum& other);
void Clamp();
bool IsClamped() const;
void Zero();
// Requires this to have enough capacity (no tests done).
// Updates used_digits_ if necessary.
// shift_amount must be < kBigitSize.
void BigitsShiftLeft(int shift_amount);
// BigitLength includes the "hidden" digits encoded in the exponent.
int BigitLength() const { return used_digits_ + exponent_; }
Chunk BigitAt(int index) const;
void SubtractTimes(const Bignum& other, int factor);
Chunk bigits_buffer_[kBigitCapacity];
// A vector backed by bigits_buffer_. This way accesses to the array are
// checked for out-of-bounds errors.
Vector<Chunk> bigits_;
int used_digits_;
// The Bignum's value equals value(bigits_) * 2^(exponent_ * kBigitSize).
int exponent_;
DISALLOW_COPY_AND_ASSIGN(Bignum);
};
} // namespace double_conversion
#endif // DOUBLE_CONVERSION_BIGNUM_H_

View File

@ -0,0 +1,176 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdarg.h>
#include <limits.h>
#include <math.h>
#include "utils.h"
#include "cached-powers.h"
namespace double_conversion {
struct CachedPower {
uint64_t significand;
int16_t binary_exponent;
int16_t decimal_exponent;
};
static const CachedPower kCachedPowers[] = {
{UINT64_2PART_C(0xfa8fd5a0, 081c0288), -1220, -348},
{UINT64_2PART_C(0xbaaee17f, a23ebf76), -1193, -340},
{UINT64_2PART_C(0x8b16fb20, 3055ac76), -1166, -332},
{UINT64_2PART_C(0xcf42894a, 5dce35ea), -1140, -324},
{UINT64_2PART_C(0x9a6bb0aa, 55653b2d), -1113, -316},
{UINT64_2PART_C(0xe61acf03, 3d1a45df), -1087, -308},
{UINT64_2PART_C(0xab70fe17, c79ac6ca), -1060, -300},
{UINT64_2PART_C(0xff77b1fc, bebcdc4f), -1034, -292},
{UINT64_2PART_C(0xbe5691ef, 416bd60c), -1007, -284},
{UINT64_2PART_C(0x8dd01fad, 907ffc3c), -980, -276},
{UINT64_2PART_C(0xd3515c28, 31559a83), -954, -268},
{UINT64_2PART_C(0x9d71ac8f, ada6c9b5), -927, -260},
{UINT64_2PART_C(0xea9c2277, 23ee8bcb), -901, -252},
{UINT64_2PART_C(0xaecc4991, 4078536d), -874, -244},
{UINT64_2PART_C(0x823c1279, 5db6ce57), -847, -236},
{UINT64_2PART_C(0xc2109436, 4dfb5637), -821, -228},
{UINT64_2PART_C(0x9096ea6f, 3848984f), -794, -220},
{UINT64_2PART_C(0xd77485cb, 25823ac7), -768, -212},
{UINT64_2PART_C(0xa086cfcd, 97bf97f4), -741, -204},
{UINT64_2PART_C(0xef340a98, 172aace5), -715, -196},
{UINT64_2PART_C(0xb23867fb, 2a35b28e), -688, -188},
{UINT64_2PART_C(0x84c8d4df, d2c63f3b), -661, -180},
{UINT64_2PART_C(0xc5dd4427, 1ad3cdba), -635, -172},
{UINT64_2PART_C(0x936b9fce, bb25c996), -608, -164},
{UINT64_2PART_C(0xdbac6c24, 7d62a584), -582, -156},
{UINT64_2PART_C(0xa3ab6658, 0d5fdaf6), -555, -148},
{UINT64_2PART_C(0xf3e2f893, dec3f126), -529, -140},
{UINT64_2PART_C(0xb5b5ada8, aaff80b8), -502, -132},
{UINT64_2PART_C(0x87625f05, 6c7c4a8b), -475, -124},
{UINT64_2PART_C(0xc9bcff60, 34c13053), -449, -116},
{UINT64_2PART_C(0x964e858c, 91ba2655), -422, -108},
{UINT64_2PART_C(0xdff97724, 70297ebd), -396, -100},
{UINT64_2PART_C(0xa6dfbd9f, b8e5b88f), -369, -92},
{UINT64_2PART_C(0xf8a95fcf, 88747d94), -343, -84},
{UINT64_2PART_C(0xb9447093, 8fa89bcf), -316, -76},
{UINT64_2PART_C(0x8a08f0f8, bf0f156b), -289, -68},
{UINT64_2PART_C(0xcdb02555, 653131b6), -263, -60},
{UINT64_2PART_C(0x993fe2c6, d07b7fac), -236, -52},
{UINT64_2PART_C(0xe45c10c4, 2a2b3b06), -210, -44},
{UINT64_2PART_C(0xaa242499, 697392d3), -183, -36},
{UINT64_2PART_C(0xfd87b5f2, 8300ca0e), -157, -28},
{UINT64_2PART_C(0xbce50864, 92111aeb), -130, -20},
{UINT64_2PART_C(0x8cbccc09, 6f5088cc), -103, -12},
{UINT64_2PART_C(0xd1b71758, e219652c), -77, -4},
{UINT64_2PART_C(0x9c400000, 00000000), -50, 4},
{UINT64_2PART_C(0xe8d4a510, 00000000), -24, 12},
{UINT64_2PART_C(0xad78ebc5, ac620000), 3, 20},
{UINT64_2PART_C(0x813f3978, f8940984), 30, 28},
{UINT64_2PART_C(0xc097ce7b, c90715b3), 56, 36},
{UINT64_2PART_C(0x8f7e32ce, 7bea5c70), 83, 44},
{UINT64_2PART_C(0xd5d238a4, abe98068), 109, 52},
{UINT64_2PART_C(0x9f4f2726, 179a2245), 136, 60},
{UINT64_2PART_C(0xed63a231, d4c4fb27), 162, 68},
{UINT64_2PART_C(0xb0de6538, 8cc8ada8), 189, 76},
{UINT64_2PART_C(0x83c7088e, 1aab65db), 216, 84},
{UINT64_2PART_C(0xc45d1df9, 42711d9a), 242, 92},
{UINT64_2PART_C(0x924d692c, a61be758), 269, 100},
{UINT64_2PART_C(0xda01ee64, 1a708dea), 295, 108},
{UINT64_2PART_C(0xa26da399, 9aef774a), 322, 116},
{UINT64_2PART_C(0xf209787b, b47d6b85), 348, 124},
{UINT64_2PART_C(0xb454e4a1, 79dd1877), 375, 132},
{UINT64_2PART_C(0x865b8692, 5b9bc5c2), 402, 140},
{UINT64_2PART_C(0xc83553c5, c8965d3d), 428, 148},
{UINT64_2PART_C(0x952ab45c, fa97a0b3), 455, 156},
{UINT64_2PART_C(0xde469fbd, 99a05fe3), 481, 164},
{UINT64_2PART_C(0xa59bc234, db398c25), 508, 172},
{UINT64_2PART_C(0xf6c69a72, a3989f5c), 534, 180},
{UINT64_2PART_C(0xb7dcbf53, 54e9bece), 561, 188},
{UINT64_2PART_C(0x88fcf317, f22241e2), 588, 196},
{UINT64_2PART_C(0xcc20ce9b, d35c78a5), 614, 204},
{UINT64_2PART_C(0x98165af3, 7b2153df), 641, 212},
{UINT64_2PART_C(0xe2a0b5dc, 971f303a), 667, 220},
{UINT64_2PART_C(0xa8d9d153, 5ce3b396), 694, 228},
{UINT64_2PART_C(0xfb9b7cd9, a4a7443c), 720, 236},
{UINT64_2PART_C(0xbb764c4c, a7a44410), 747, 244},
{UINT64_2PART_C(0x8bab8eef, b6409c1a), 774, 252},
{UINT64_2PART_C(0xd01fef10, a657842c), 800, 260},
{UINT64_2PART_C(0x9b10a4e5, e9913129), 827, 268},
{UINT64_2PART_C(0xe7109bfb, a19c0c9d), 853, 276},
{UINT64_2PART_C(0xac2820d9, 623bf429), 880, 284},
{UINT64_2PART_C(0x80444b5e, 7aa7cf85), 907, 292},
{UINT64_2PART_C(0xbf21e440, 03acdd2d), 933, 300},
{UINT64_2PART_C(0x8e679c2f, 5e44ff8f), 960, 308},
{UINT64_2PART_C(0xd433179d, 9c8cb841), 986, 316},
{UINT64_2PART_C(0x9e19db92, b4e31ba9), 1013, 324},
{UINT64_2PART_C(0xeb96bf6e, badf77d9), 1039, 332},
{UINT64_2PART_C(0xaf87023b, 9bf0ee6b), 1066, 340},
};
static const int kCachedPowersLength = ARRAY_SIZE(kCachedPowers);
static const int kCachedPowersOffset = 348; // -1 * the first decimal_exponent.
static const double kD_1_LOG2_10 = 0.30102999566398114; // 1 / lg(10)
// Difference between the decimal exponents in the table above.
const int PowersOfTenCache::kDecimalExponentDistance = 8;
const int PowersOfTenCache::kMinDecimalExponent = -348;
const int PowersOfTenCache::kMaxDecimalExponent = 340;
void PowersOfTenCache::GetCachedPowerForBinaryExponentRange(
int min_exponent,
int max_exponent,
DiyFp* power,
int* decimal_exponent) {
int kQ = DiyFp::kSignificandSize;
double k = ceil((min_exponent + kQ - 1) * kD_1_LOG2_10);
int foo = kCachedPowersOffset;
int index =
(foo + static_cast<int>(k) - 1) / kDecimalExponentDistance + 1;
ASSERT(0 <= index && index < kCachedPowersLength);
CachedPower cached_power = kCachedPowers[index];
ASSERT(min_exponent <= cached_power.binary_exponent);
(void) max_exponent; // Mark variable as used.
ASSERT(cached_power.binary_exponent <= max_exponent);
*decimal_exponent = cached_power.decimal_exponent;
*power = DiyFp(cached_power.significand, cached_power.binary_exponent);
}
void PowersOfTenCache::GetCachedPowerForDecimalExponent(int requested_exponent,
DiyFp* power,
int* found_exponent) {
ASSERT(kMinDecimalExponent <= requested_exponent);
ASSERT(requested_exponent < kMaxDecimalExponent + kDecimalExponentDistance);
int index =
(requested_exponent + kCachedPowersOffset) / kDecimalExponentDistance;
CachedPower cached_power = kCachedPowers[index];
*power = DiyFp(cached_power.significand, cached_power.binary_exponent);
*found_exponent = cached_power.decimal_exponent;
ASSERT(*found_exponent <= requested_exponent);
ASSERT(requested_exponent < *found_exponent + kDecimalExponentDistance);
}
} // namespace double_conversion

View File

@ -0,0 +1,64 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef DOUBLE_CONVERSION_CACHED_POWERS_H_
#define DOUBLE_CONVERSION_CACHED_POWERS_H_
#include "diy-fp.h"
namespace double_conversion {
class PowersOfTenCache {
public:
// Not all powers of ten are cached. The decimal exponent of two neighboring
// cached numbers will differ by kDecimalExponentDistance.
static const int kDecimalExponentDistance;
static const int kMinDecimalExponent;
static const int kMaxDecimalExponent;
// Returns a cached power-of-ten with a binary exponent in the range
// [min_exponent; max_exponent] (boundaries included).
static void GetCachedPowerForBinaryExponentRange(int min_exponent,
int max_exponent,
DiyFp* power,
int* decimal_exponent);
// Returns a cached power of ten x ~= 10^k such that
// k <= decimal_exponent < k + kCachedPowersDecimalDistance.
// The given decimal_exponent must satisfy
// kMinDecimalExponent <= requested_exponent, and
// requested_exponent < kMaxDecimalExponent + kDecimalExponentDistance.
static void GetCachedPowerForDecimalExponent(int requested_exponent,
DiyFp* power,
int* found_exponent);
};
} // namespace double_conversion
#endif // DOUBLE_CONVERSION_CACHED_POWERS_H_

View File

@ -0,0 +1,57 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "diy-fp.h"
#include "utils.h"
namespace double_conversion {
void DiyFp::Multiply(const DiyFp& other) {
// Simply "emulates" a 128 bit multiplication.
// However: the resulting number only contains 64 bits. The least
// significant 64 bits are only used for rounding the most significant 64
// bits.
const uint64_t kM32 = 0xFFFFFFFFU;
uint64_t a = f_ >> 32;
uint64_t b = f_ & kM32;
uint64_t c = other.f_ >> 32;
uint64_t d = other.f_ & kM32;
uint64_t ac = a * c;
uint64_t bc = b * c;
uint64_t ad = a * d;
uint64_t bd = b * d;
uint64_t tmp = (bd >> 32) + (ad & kM32) + (bc & kM32);
// By adding 1U << 31 to tmp we round the final result.
// Halfway cases will be round up.
tmp += 1U << 31;
uint64_t result_f = ac + (ad >> 32) + (bc >> 32) + (tmp >> 32);
e_ += other.e_ + 64;
f_ = result_f;
}
} // namespace double_conversion

View File

@ -0,0 +1,118 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef DOUBLE_CONVERSION_DIY_FP_H_
#define DOUBLE_CONVERSION_DIY_FP_H_
#include "utils.h"
namespace double_conversion {
// This "Do It Yourself Floating Point" class implements a floating-point number
// with a uint64 significand and an int exponent. Normalized DiyFp numbers will
// have the most significant bit of the significand set.
// Multiplication and Subtraction do not normalize their results.
// DiyFp are not designed to contain special doubles (NaN and Infinity).
class DiyFp {
public:
static const int kSignificandSize = 64;
DiyFp() : f_(0), e_(0) {}
DiyFp(uint64_t f, int e) : f_(f), e_(e) {}
// this = this - other.
// The exponents of both numbers must be the same and the significand of this
// must be bigger than the significand of other.
// The result will not be normalized.
void Subtract(const DiyFp& other) {
ASSERT(e_ == other.e_);
ASSERT(f_ >= other.f_);
f_ -= other.f_;
}
// Returns a - b.
// The exponents of both numbers must be the same and this must be bigger
// than other. The result will not be normalized.
static DiyFp Minus(const DiyFp& a, const DiyFp& b) {
DiyFp result = a;
result.Subtract(b);
return result;
}
// this = this * other.
void Multiply(const DiyFp& other);
// returns a * b;
static DiyFp Times(const DiyFp& a, const DiyFp& b) {
DiyFp result = a;
result.Multiply(b);
return result;
}
void Normalize() {
ASSERT(f_ != 0);
uint64_t f = f_;
int e = e_;
// This method is mainly called for normalizing boundaries. In general
// boundaries need to be shifted by 10 bits. We thus optimize for this case.
const uint64_t k10MSBits = UINT64_2PART_C(0xFFC00000, 00000000);
while ((f & k10MSBits) == 0) {
f <<= 10;
e -= 10;
}
while ((f & kUint64MSB) == 0) {
f <<= 1;
e--;
}
f_ = f;
e_ = e;
}
static DiyFp Normalize(const DiyFp& a) {
DiyFp result = a;
result.Normalize();
return result;
}
uint64_t f() const { return f_; }
int e() const { return e_; }
void set_f(uint64_t new_value) { f_ = new_value; }
void set_e(int new_value) { e_ = new_value; }
private:
static const uint64_t kUint64MSB = UINT64_2PART_C(0x80000000, 00000000);
uint64_t f_;
int e_;
};
} // namespace double_conversion
#endif // DOUBLE_CONVERSION_DIY_FP_H_

View File

@ -0,0 +1,910 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <limits.h>
#include <math.h>
#include "double-conversion.h"
#include "bignum-dtoa.h"
#include "fast-dtoa.h"
#include "fixed-dtoa.h"
#include "ieee.h"
#include "strtod.h"
#include "utils.h"
namespace double_conversion {
const DoubleToStringConverter& DoubleToStringConverter::EcmaScriptConverter() {
int flags = UNIQUE_ZERO | EMIT_POSITIVE_EXPONENT_SIGN;
static DoubleToStringConverter converter(flags,
"Infinity",
"NaN",
'e',
-6, 21,
6, 0);
return converter;
}
bool DoubleToStringConverter::HandleSpecialValues(
double value,
StringBuilder* result_builder) const {
Double double_inspect(value);
if (double_inspect.IsInfinite()) {
if (infinity_symbol_ == NULL) return false;
if (value < 0) {
result_builder->AddCharacter('-');
}
result_builder->AddString(infinity_symbol_);
return true;
}
if (double_inspect.IsNan()) {
if (nan_symbol_ == NULL) return false;
result_builder->AddString(nan_symbol_);
return true;
}
return false;
}
void DoubleToStringConverter::CreateExponentialRepresentation(
const char* decimal_digits,
int length,
int exponent,
StringBuilder* result_builder) const {
ASSERT(length != 0);
result_builder->AddCharacter(decimal_digits[0]);
if (length != 1) {
result_builder->AddCharacter('.');
result_builder->AddSubstring(&decimal_digits[1], length-1);
}
result_builder->AddCharacter(exponent_character_);
if (exponent < 0) {
result_builder->AddCharacter('-');
exponent = -exponent;
} else {
if ((flags_ & EMIT_POSITIVE_EXPONENT_SIGN) != 0) {
result_builder->AddCharacter('+');
}
}
if (exponent == 0) {
result_builder->AddCharacter('0');
return;
}
ASSERT(exponent < 1e4);
const int kMaxExponentLength = 5;
char buffer[kMaxExponentLength + 1];
buffer[kMaxExponentLength] = '\0';
int first_char_pos = kMaxExponentLength;
while (exponent > 0) {
buffer[--first_char_pos] = '0' + (exponent % 10);
exponent /= 10;
}
result_builder->AddSubstring(&buffer[first_char_pos],
kMaxExponentLength - first_char_pos);
}
void DoubleToStringConverter::CreateDecimalRepresentation(
const char* decimal_digits,
int length,
int decimal_point,
int digits_after_point,
StringBuilder* result_builder) const {
// Create a representation that is padded with zeros if needed.
if (decimal_point <= 0) {
// "0.00000decimal_rep".
result_builder->AddCharacter('0');
if (digits_after_point > 0) {
result_builder->AddCharacter('.');
result_builder->AddPadding('0', -decimal_point);
ASSERT(length <= digits_after_point - (-decimal_point));
result_builder->AddSubstring(decimal_digits, length);
int remaining_digits = digits_after_point - (-decimal_point) - length;
result_builder->AddPadding('0', remaining_digits);
}
} else if (decimal_point >= length) {
// "decimal_rep0000.00000" or "decimal_rep.0000"
result_builder->AddSubstring(decimal_digits, length);
result_builder->AddPadding('0', decimal_point - length);
if (digits_after_point > 0) {
result_builder->AddCharacter('.');
result_builder->AddPadding('0', digits_after_point);
}
} else {
// "decima.l_rep000"
ASSERT(digits_after_point > 0);
result_builder->AddSubstring(decimal_digits, decimal_point);
result_builder->AddCharacter('.');
ASSERT(length - decimal_point <= digits_after_point);
result_builder->AddSubstring(&decimal_digits[decimal_point],
length - decimal_point);
int remaining_digits = digits_after_point - (length - decimal_point);
result_builder->AddPadding('0', remaining_digits);
}
if (digits_after_point == 0) {
if ((flags_ & EMIT_TRAILING_DECIMAL_POINT) != 0) {
result_builder->AddCharacter('.');
}
if ((flags_ & EMIT_TRAILING_ZERO_AFTER_POINT) != 0) {
result_builder->AddCharacter('0');
}
}
}
bool DoubleToStringConverter::ToShortestIeeeNumber(
double value,
StringBuilder* result_builder,
DoubleToStringConverter::DtoaMode mode) const {
ASSERT(mode == SHORTEST || mode == SHORTEST_SINGLE);
if (Double(value).IsSpecial()) {
return HandleSpecialValues(value, result_builder);
}
int decimal_point;
bool sign;
const int kDecimalRepCapacity = kBase10MaximalLength + 1;
char decimal_rep[kDecimalRepCapacity];
int decimal_rep_length;
DoubleToAscii(value, mode, 0, decimal_rep, kDecimalRepCapacity,
&sign, &decimal_rep_length, &decimal_point);
bool unique_zero = (flags_ & UNIQUE_ZERO) != 0;
if (sign && (value != 0.0 || !unique_zero)) {
result_builder->AddCharacter('-');
}
int exponent = decimal_point - 1;
if ((decimal_in_shortest_low_ <= exponent) &&
(exponent < decimal_in_shortest_high_)) {
CreateDecimalRepresentation(decimal_rep, decimal_rep_length,
decimal_point,
Max(0, decimal_rep_length - decimal_point),
result_builder);
} else {
CreateExponentialRepresentation(decimal_rep, decimal_rep_length, exponent,
result_builder);
}
return true;
}
bool DoubleToStringConverter::ToFixed(double value,
int requested_digits,
StringBuilder* result_builder) const {
ASSERT(kMaxFixedDigitsBeforePoint == 60);
const double kFirstNonFixed = 1e60;
if (Double(value).IsSpecial()) {
return HandleSpecialValues(value, result_builder);
}
if (requested_digits > kMaxFixedDigitsAfterPoint) return false;
if (value >= kFirstNonFixed || value <= -kFirstNonFixed) return false;
// Find a sufficiently precise decimal representation of n.
int decimal_point;
bool sign;
// Add space for the '\0' byte.
const int kDecimalRepCapacity =
kMaxFixedDigitsBeforePoint + kMaxFixedDigitsAfterPoint + 1;
char decimal_rep[kDecimalRepCapacity];
int decimal_rep_length;
DoubleToAscii(value, FIXED, requested_digits,
decimal_rep, kDecimalRepCapacity,
&sign, &decimal_rep_length, &decimal_point);
bool unique_zero = ((flags_ & UNIQUE_ZERO) != 0);
if (sign && (value != 0.0 || !unique_zero)) {
result_builder->AddCharacter('-');
}
CreateDecimalRepresentation(decimal_rep, decimal_rep_length, decimal_point,
requested_digits, result_builder);
return true;
}
bool DoubleToStringConverter::ToExponential(
double value,
int requested_digits,
StringBuilder* result_builder) const {
if (Double(value).IsSpecial()) {
return HandleSpecialValues(value, result_builder);
}
if (requested_digits < -1) return false;
if (requested_digits > kMaxExponentialDigits) return false;
int decimal_point;
bool sign;
// Add space for digit before the decimal point and the '\0' character.
const int kDecimalRepCapacity = kMaxExponentialDigits + 2;
ASSERT(kDecimalRepCapacity > kBase10MaximalLength);
char decimal_rep[kDecimalRepCapacity];
int decimal_rep_length;
if (requested_digits == -1) {
DoubleToAscii(value, SHORTEST, 0,
decimal_rep, kDecimalRepCapacity,
&sign, &decimal_rep_length, &decimal_point);
} else {
DoubleToAscii(value, PRECISION, requested_digits + 1,
decimal_rep, kDecimalRepCapacity,
&sign, &decimal_rep_length, &decimal_point);
ASSERT(decimal_rep_length <= requested_digits + 1);
for (int i = decimal_rep_length; i < requested_digits + 1; ++i) {
decimal_rep[i] = '0';
}
decimal_rep_length = requested_digits + 1;
}
bool unique_zero = ((flags_ & UNIQUE_ZERO) != 0);
if (sign && (value != 0.0 || !unique_zero)) {
result_builder->AddCharacter('-');
}
int exponent = decimal_point - 1;
CreateExponentialRepresentation(decimal_rep,
decimal_rep_length,
exponent,
result_builder);
return true;
}
bool DoubleToStringConverter::ToPrecision(double value,
int precision,
StringBuilder* result_builder) const {
if (Double(value).IsSpecial()) {
return HandleSpecialValues(value, result_builder);
}
if (precision < kMinPrecisionDigits || precision > kMaxPrecisionDigits) {
return false;
}
// Find a sufficiently precise decimal representation of n.
int decimal_point;
bool sign;
// Add one for the terminating null character.
const int kDecimalRepCapacity = kMaxPrecisionDigits + 1;
char decimal_rep[kDecimalRepCapacity];
int decimal_rep_length;
DoubleToAscii(value, PRECISION, precision,
decimal_rep, kDecimalRepCapacity,
&sign, &decimal_rep_length, &decimal_point);
ASSERT(decimal_rep_length <= precision);
bool unique_zero = ((flags_ & UNIQUE_ZERO) != 0);
if (sign && (value != 0.0 || !unique_zero)) {
result_builder->AddCharacter('-');
}
// The exponent if we print the number as x.xxeyyy. That is with the
// decimal point after the first digit.
int exponent = decimal_point - 1;
int extra_zero = ((flags_ & EMIT_TRAILING_ZERO_AFTER_POINT) != 0) ? 1 : 0;
if ((-decimal_point + 1 > max_leading_padding_zeroes_in_precision_mode_) ||
(decimal_point - precision + extra_zero >
max_trailing_padding_zeroes_in_precision_mode_)) {
// Fill buffer to contain 'precision' digits.
// Usually the buffer is already at the correct length, but 'DoubleToAscii'
// is allowed to return less characters.
for (int i = decimal_rep_length; i < precision; ++i) {
decimal_rep[i] = '0';
}
CreateExponentialRepresentation(decimal_rep,
precision,
exponent,
result_builder);
} else {
CreateDecimalRepresentation(decimal_rep, decimal_rep_length, decimal_point,
Max(0, precision - decimal_point),
result_builder);
}
return true;
}
static BignumDtoaMode DtoaToBignumDtoaMode(
DoubleToStringConverter::DtoaMode dtoa_mode) {
switch (dtoa_mode) {
case DoubleToStringConverter::SHORTEST: return BIGNUM_DTOA_SHORTEST;
case DoubleToStringConverter::SHORTEST_SINGLE:
return BIGNUM_DTOA_SHORTEST_SINGLE;
case DoubleToStringConverter::FIXED: return BIGNUM_DTOA_FIXED;
case DoubleToStringConverter::PRECISION: return BIGNUM_DTOA_PRECISION;
default:
UNREACHABLE();
}
}
void DoubleToStringConverter::DoubleToAscii(double v,
DtoaMode mode,
int requested_digits,
char* buffer,
int buffer_length,
bool* sign,
int* length,
int* point) {
Vector<char> vector(buffer, buffer_length);
ASSERT(!Double(v).IsSpecial());
ASSERT(mode == SHORTEST || mode == SHORTEST_SINGLE || requested_digits >= 0);
if (Double(v).Sign() < 0) {
*sign = true;
v = -v;
} else {
*sign = false;
}
if (mode == PRECISION && requested_digits == 0) {
vector[0] = '\0';
*length = 0;
return;
}
if (v == 0) {
vector[0] = '0';
vector[1] = '\0';
*length = 1;
*point = 1;
return;
}
bool fast_worked;
switch (mode) {
case SHORTEST:
fast_worked = FastDtoa(v, FAST_DTOA_SHORTEST, 0, vector, length, point);
break;
case SHORTEST_SINGLE:
fast_worked = FastDtoa(v, FAST_DTOA_SHORTEST_SINGLE, 0,
vector, length, point);
break;
case FIXED:
fast_worked = FastFixedDtoa(v, requested_digits, vector, length, point);
break;
case PRECISION:
fast_worked = FastDtoa(v, FAST_DTOA_PRECISION, requested_digits,
vector, length, point);
break;
default:
fast_worked = false;
UNREACHABLE();
}
if (fast_worked) return;
// If the fast dtoa didn't succeed use the slower bignum version.
BignumDtoaMode bignum_mode = DtoaToBignumDtoaMode(mode);
BignumDtoa(v, bignum_mode, requested_digits, vector, length, point);
vector[*length] = '\0';
}
// Consumes the given substring from the iterator.
// Returns false, if the substring does not match.
static bool ConsumeSubString(const char** current,
const char* end,
const char* substring) {
ASSERT(**current == *substring);
for (substring++; *substring != '\0'; substring++) {
++*current;
if (*current == end || **current != *substring) return false;
}
++*current;
return true;
}
// Maximum number of significant digits in decimal representation.
// The longest possible double in decimal representation is
// (2^53 - 1) * 2 ^ -1074 that is (2 ^ 53 - 1) * 5 ^ 1074 / 10 ^ 1074
// (768 digits). If we parse a number whose first digits are equal to a
// mean of 2 adjacent doubles (that could have up to 769 digits) the result
// must be rounded to the bigger one unless the tail consists of zeros, so
// we don't need to preserve all the digits.
const int kMaxSignificantDigits = 772;
// Returns true if a nonspace found and false if the end has reached.
static inline bool AdvanceToNonspace(const char** current, const char* end) {
while (*current != end) {
if (**current != ' ') return true;
++*current;
}
return false;
}
static bool isDigit(int x, int radix) {
return (x >= '0' && x <= '9' && x < '0' + radix)
|| (radix > 10 && x >= 'a' && x < 'a' + radix - 10)
|| (radix > 10 && x >= 'A' && x < 'A' + radix - 10);
}
static double SignedZero(bool sign) {
return sign ? -0.0 : 0.0;
}
// Returns true if 'c' is a decimal digit that is valid for the given radix.
//
// The function is small and could be inlined, but VS2012 emitted a warning
// because it constant-propagated the radix and concluded that the last
// condition was always true. By moving it into a separate function the
// compiler wouldn't warn anymore.
static bool IsDecimalDigitForRadix(int c, int radix) {
return '0' <= c && c <= '9' && (c - '0') < radix;
}
// Returns true if 'c' is a character digit that is valid for the given radix.
// The 'a_character' should be 'a' or 'A'.
//
// The function is small and could be inlined, but VS2012 emitted a warning
// because it constant-propagated the radix and concluded that the first
// condition was always false. By moving it into a separate function the
// compiler wouldn't warn anymore.
static bool IsCharacterDigitForRadix(int c, int radix, char a_character) {
return radix > 10 && c >= a_character && c < a_character + radix - 10;
}
// Parsing integers with radix 2, 4, 8, 16, 32. Assumes current != end.
template <int radix_log_2>
static double RadixStringToIeee(const char* current,
const char* end,
bool sign,
bool allow_trailing_junk,
double junk_string_value,
bool read_as_double,
const char** trailing_pointer) {
ASSERT(current != end);
const int kDoubleSize = Double::kSignificandSize;
const int kSingleSize = Single::kSignificandSize;
const int kSignificandSize = read_as_double? kDoubleSize: kSingleSize;
// Skip leading 0s.
while (*current == '0') {
++current;
if (current == end) {
*trailing_pointer = end;
return SignedZero(sign);
}
}
int64_t number = 0;
int exponent = 0;
const int radix = (1 << radix_log_2);
do {
int digit;
if (IsDecimalDigitForRadix(*current, radix)) {
digit = static_cast<char>(*current) - '0';
} else if (IsCharacterDigitForRadix(*current, radix, 'a')) {
digit = static_cast<char>(*current) - 'a' + 10;
} else if (IsCharacterDigitForRadix(*current, radix, 'A')) {
digit = static_cast<char>(*current) - 'A' + 10;
} else {
if (allow_trailing_junk || !AdvanceToNonspace(&current, end)) {
break;
} else {
return junk_string_value;
}
}
number = number * radix + digit;
int overflow = static_cast<int>(number >> kSignificandSize);
if (overflow != 0) {
// Overflow occurred. Need to determine which direction to round the
// result.
int overflow_bits_count = 1;
while (overflow > 1) {
overflow_bits_count++;
overflow >>= 1;
}
int dropped_bits_mask = ((1 << overflow_bits_count) - 1);
int dropped_bits = static_cast<int>(number) & dropped_bits_mask;
number >>= overflow_bits_count;
exponent = overflow_bits_count;
bool zero_tail = true;
for (;;) {
++current;
if (current == end || !isDigit(*current, radix)) break;
zero_tail = zero_tail && *current == '0';
exponent += radix_log_2;
}
if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
return junk_string_value;
}
int middle_value = (1 << (overflow_bits_count - 1));
if (dropped_bits > middle_value) {
number++; // Rounding up.
} else if (dropped_bits == middle_value) {
// Rounding to even to consistency with decimals: half-way case rounds
// up if significant part is odd and down otherwise.
if ((number & 1) != 0 || !zero_tail) {
number++; // Rounding up.
}
}
// Rounding up may cause overflow.
if ((number & ((int64_t)1 << kSignificandSize)) != 0) {
exponent++;
number >>= 1;
}
break;
}
++current;
} while (current != end);
ASSERT(number < ((int64_t)1 << kSignificandSize));
ASSERT(static_cast<int64_t>(static_cast<double>(number)) == number);
*trailing_pointer = current;
if (exponent == 0) {
if (sign) {
if (number == 0) return -0.0;
number = -number;
}
return static_cast<double>(number);
}
ASSERT(number != 0);
return Double(DiyFp(number, exponent)).value();
}
double StringToDoubleConverter::StringToIeee(
const char* input,
int length,
int* processed_characters_count,
bool read_as_double) const {
const char* current = input;
const char* end = input + length;
*processed_characters_count = 0;
const bool allow_trailing_junk = (flags_ & ALLOW_TRAILING_JUNK) != 0;
const bool allow_leading_spaces = (flags_ & ALLOW_LEADING_SPACES) != 0;
const bool allow_trailing_spaces = (flags_ & ALLOW_TRAILING_SPACES) != 0;
const bool allow_spaces_after_sign = (flags_ & ALLOW_SPACES_AFTER_SIGN) != 0;
// To make sure that iterator dereferencing is valid the following
// convention is used:
// 1. Each '++current' statement is followed by check for equality to 'end'.
// 2. If AdvanceToNonspace returned false then current == end.
// 3. If 'current' becomes equal to 'end' the function returns or goes to
// 'parsing_done'.
// 4. 'current' is not dereferenced after the 'parsing_done' label.
// 5. Code before 'parsing_done' may rely on 'current != end'.
if (current == end) return empty_string_value_;
if (allow_leading_spaces || allow_trailing_spaces) {
if (!AdvanceToNonspace(&current, end)) {
*processed_characters_count = static_cast<int>(current - input);
return empty_string_value_;
}
if (!allow_leading_spaces && (input != current)) {
// No leading spaces allowed, but AdvanceToNonspace moved forward.
return junk_string_value_;
}
}
// The longest form of simplified number is: "-<significant digits>.1eXXX\0".
const int kBufferSize = kMaxSignificantDigits + 10;
char buffer[kBufferSize]; // NOLINT: size is known at compile time.
int buffer_pos = 0;
// Exponent will be adjusted if insignificant digits of the integer part
// or insignificant leading zeros of the fractional part are dropped.
int exponent = 0;
int significant_digits = 0;
int insignificant_digits = 0;
bool nonzero_digit_dropped = false;
bool sign = false;
if (*current == '+' || *current == '-') {
sign = (*current == '-');
++current;
const char* next_non_space = current;
// Skip following spaces (if allowed).
if (!AdvanceToNonspace(&next_non_space, end)) return junk_string_value_;
if (!allow_spaces_after_sign && (current != next_non_space)) {
return junk_string_value_;
}
current = next_non_space;
}
if (infinity_symbol_ != NULL) {
if (*current == infinity_symbol_[0]) {
if (!ConsumeSubString(&current, end, infinity_symbol_)) {
return junk_string_value_;
}
if (!(allow_trailing_spaces || allow_trailing_junk) && (current != end)) {
return junk_string_value_;
}
if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
return junk_string_value_;
}
ASSERT(buffer_pos == 0);
*processed_characters_count = static_cast<int>(current - input);
return sign ? -Double::Infinity() : Double::Infinity();
}
}
if (nan_symbol_ != NULL) {
if (*current == nan_symbol_[0]) {
if (!ConsumeSubString(&current, end, nan_symbol_)) {
return junk_string_value_;
}
if (!(allow_trailing_spaces || allow_trailing_junk) && (current != end)) {
return junk_string_value_;
}
if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
return junk_string_value_;
}
ASSERT(buffer_pos == 0);
*processed_characters_count = static_cast<int>(current - input);
return sign ? -Double::NaN() : Double::NaN();
}
}
bool leading_zero = false;
if (*current == '0') {
++current;
if (current == end) {
*processed_characters_count = static_cast<int>(current - input);
return SignedZero(sign);
}
leading_zero = true;
// It could be hexadecimal value.
if ((flags_ & ALLOW_HEX) && (*current == 'x' || *current == 'X')) {
++current;
if (current == end || !isDigit(*current, 16)) {
return junk_string_value_; // "0x".
}
const char* tail_pointer = NULL;
double result = RadixStringToIeee<4>(current,
end,
sign,
allow_trailing_junk,
junk_string_value_,
read_as_double,
&tail_pointer);
if (tail_pointer != NULL) {
if (allow_trailing_spaces) AdvanceToNonspace(&tail_pointer, end);
*processed_characters_count = static_cast<int>(tail_pointer - input);
}
return result;
}
// Ignore leading zeros in the integer part.
while (*current == '0') {
++current;
if (current == end) {
*processed_characters_count = static_cast<int>(current - input);
return SignedZero(sign);
}
}
}
bool octal = leading_zero && (flags_ & ALLOW_OCTALS) != 0;
// Copy significant digits of the integer part (if any) to the buffer.
while (*current >= '0' && *current <= '9') {
if (significant_digits < kMaxSignificantDigits) {
ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos++] = static_cast<char>(*current);
significant_digits++;
// Will later check if it's an octal in the buffer.
} else {
insignificant_digits++; // Move the digit into the exponential part.
nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
}
octal = octal && *current < '8';
++current;
if (current == end) goto parsing_done;
}
if (significant_digits == 0) {
octal = false;
}
if (*current == '.') {
if (octal && !allow_trailing_junk) return junk_string_value_;
if (octal) goto parsing_done;
++current;
if (current == end) {
if (significant_digits == 0 && !leading_zero) {
return junk_string_value_;
} else {
goto parsing_done;
}
}
if (significant_digits == 0) {
// octal = false;
// Integer part consists of 0 or is absent. Significant digits start after
// leading zeros (if any).
while (*current == '0') {
++current;
if (current == end) {
*processed_characters_count = static_cast<int>(current - input);
return SignedZero(sign);
}
exponent--; // Move this 0 into the exponent.
}
}
// There is a fractional part.
// We don't emit a '.', but adjust the exponent instead.
while (*current >= '0' && *current <= '9') {
if (significant_digits < kMaxSignificantDigits) {
ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos++] = static_cast<char>(*current);
significant_digits++;
exponent--;
} else {
// Ignore insignificant digits in the fractional part.
nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
}
++current;
if (current == end) goto parsing_done;
}
}
if (!leading_zero && exponent == 0 && significant_digits == 0) {
// If leading_zeros is true then the string contains zeros.
// If exponent < 0 then string was [+-]\.0*...
// If significant_digits != 0 the string is not equal to 0.
// Otherwise there are no digits in the string.
return junk_string_value_;
}
// Parse exponential part.
if (*current == 'e' || *current == 'E') {
if (octal && !allow_trailing_junk) return junk_string_value_;
if (octal) goto parsing_done;
++current;
if (current == end) {
if (allow_trailing_junk) {
goto parsing_done;
} else {
return junk_string_value_;
}
}
char sign = '+';
if (*current == '+' || *current == '-') {
sign = static_cast<char>(*current);
++current;
if (current == end) {
if (allow_trailing_junk) {
goto parsing_done;
} else {
return junk_string_value_;
}
}
}
if (current == end || *current < '0' || *current > '9') {
if (allow_trailing_junk) {
goto parsing_done;
} else {
return junk_string_value_;
}
}
const int max_exponent = INT_MAX / 2;
ASSERT(-max_exponent / 2 <= exponent && exponent <= max_exponent / 2);
int num = 0;
do {
// Check overflow.
int digit = *current - '0';
if (num >= max_exponent / 10
&& !(num == max_exponent / 10 && digit <= max_exponent % 10)) {
num = max_exponent;
} else {
num = num * 10 + digit;
}
++current;
} while (current != end && *current >= '0' && *current <= '9');
exponent += (sign == '-' ? -num : num);
}
if (!(allow_trailing_spaces || allow_trailing_junk) && (current != end)) {
return junk_string_value_;
}
if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
return junk_string_value_;
}
if (allow_trailing_spaces) {
AdvanceToNonspace(&current, end);
}
parsing_done:
exponent += insignificant_digits;
if (octal) {
double result;
const char* tail_pointer = NULL;
result = RadixStringToIeee<3>(buffer,
buffer + buffer_pos,
sign,
allow_trailing_junk,
junk_string_value_,
read_as_double,
&tail_pointer);
ASSERT(tail_pointer != NULL);
*processed_characters_count = static_cast<int>(current - input);
return result;
}
if (nonzero_digit_dropped) {
buffer[buffer_pos++] = '1';
exponent--;
}
ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos] = '\0';
double converted;
if (read_as_double) {
converted = Strtod(Vector<const char>(buffer, buffer_pos), exponent);
} else {
converted = Strtof(Vector<const char>(buffer, buffer_pos), exponent);
}
*processed_characters_count = static_cast<int>(current - input);
return sign? -converted: converted;
}
} // namespace double_conversion

View File

@ -0,0 +1,536 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef DOUBLE_CONVERSION_DOUBLE_CONVERSION_H_
#define DOUBLE_CONVERSION_DOUBLE_CONVERSION_H_
#include "utils.h"
namespace double_conversion {
class DoubleToStringConverter {
public:
// When calling ToFixed with a double > 10^kMaxFixedDigitsBeforePoint
// or a requested_digits parameter > kMaxFixedDigitsAfterPoint then the
// function returns false.
static const int kMaxFixedDigitsBeforePoint = 60;
static const int kMaxFixedDigitsAfterPoint = 60;
// When calling ToExponential with a requested_digits
// parameter > kMaxExponentialDigits then the function returns false.
static const int kMaxExponentialDigits = 120;
// When calling ToPrecision with a requested_digits
// parameter < kMinPrecisionDigits or requested_digits > kMaxPrecisionDigits
// then the function returns false.
static const int kMinPrecisionDigits = 1;
static const int kMaxPrecisionDigits = 120;
enum Flags {
NO_FLAGS = 0,
EMIT_POSITIVE_EXPONENT_SIGN = 1,
EMIT_TRAILING_DECIMAL_POINT = 2,
EMIT_TRAILING_ZERO_AFTER_POINT = 4,
UNIQUE_ZERO = 8
};
// Flags should be a bit-or combination of the possible Flags-enum.
// - NO_FLAGS: no special flags.
// - EMIT_POSITIVE_EXPONENT_SIGN: when the number is converted into exponent
// form, emits a '+' for positive exponents. Example: 1.2e+2.
// - EMIT_TRAILING_DECIMAL_POINT: when the input number is an integer and is
// converted into decimal format then a trailing decimal point is appended.
// Example: 2345.0 is converted to "2345.".
// - EMIT_TRAILING_ZERO_AFTER_POINT: in addition to a trailing decimal point
// emits a trailing '0'-character. This flag requires the
// EXMIT_TRAILING_DECIMAL_POINT flag.
// Example: 2345.0 is converted to "2345.0".
// - UNIQUE_ZERO: "-0.0" is converted to "0.0".
//
// Infinity symbol and nan_symbol provide the string representation for these
// special values. If the string is NULL and the special value is encountered
// then the conversion functions return false.
//
// The exponent_character is used in exponential representations. It is
// usually 'e' or 'E'.
//
// When converting to the shortest representation the converter will
// represent input numbers in decimal format if they are in the interval
// [10^decimal_in_shortest_low; 10^decimal_in_shortest_high[
// (lower boundary included, greater boundary excluded).
// Example: with decimal_in_shortest_low = -6 and
// decimal_in_shortest_high = 21:
// ToShortest(0.000001) -> "0.000001"
// ToShortest(0.0000001) -> "1e-7"
// ToShortest(111111111111111111111.0) -> "111111111111111110000"
// ToShortest(100000000000000000000.0) -> "100000000000000000000"
// ToShortest(1111111111111111111111.0) -> "1.1111111111111111e+21"
//
// When converting to precision mode the converter may add
// max_leading_padding_zeroes before returning the number in exponential
// format.
// Example with max_leading_padding_zeroes_in_precision_mode = 6.
// ToPrecision(0.0000012345, 2) -> "0.0000012"
// ToPrecision(0.00000012345, 2) -> "1.2e-7"
// Similarily the converter may add up to
// max_trailing_padding_zeroes_in_precision_mode in precision mode to avoid
// returning an exponential representation. A zero added by the
// EMIT_TRAILING_ZERO_AFTER_POINT flag is counted for this limit.
// Examples for max_trailing_padding_zeroes_in_precision_mode = 1:
// ToPrecision(230.0, 2) -> "230"
// ToPrecision(230.0, 2) -> "230." with EMIT_TRAILING_DECIMAL_POINT.
// ToPrecision(230.0, 2) -> "2.3e2" with EMIT_TRAILING_ZERO_AFTER_POINT.
DoubleToStringConverter(int flags,
const char* infinity_symbol,
const char* nan_symbol,
char exponent_character,
int decimal_in_shortest_low,
int decimal_in_shortest_high,
int max_leading_padding_zeroes_in_precision_mode,
int max_trailing_padding_zeroes_in_precision_mode)
: flags_(flags),
infinity_symbol_(infinity_symbol),
nan_symbol_(nan_symbol),
exponent_character_(exponent_character),
decimal_in_shortest_low_(decimal_in_shortest_low),
decimal_in_shortest_high_(decimal_in_shortest_high),
max_leading_padding_zeroes_in_precision_mode_(
max_leading_padding_zeroes_in_precision_mode),
max_trailing_padding_zeroes_in_precision_mode_(
max_trailing_padding_zeroes_in_precision_mode) {
// When 'trailing zero after the point' is set, then 'trailing point'
// must be set too.
ASSERT(((flags & EMIT_TRAILING_DECIMAL_POINT) != 0) ||
!((flags & EMIT_TRAILING_ZERO_AFTER_POINT) != 0));
}
// Returns a converter following the EcmaScript specification.
static const DoubleToStringConverter& EcmaScriptConverter();
// Computes the shortest string of digits that correctly represent the input
// number. Depending on decimal_in_shortest_low and decimal_in_shortest_high
// (see constructor) it then either returns a decimal representation, or an
// exponential representation.
// Example with decimal_in_shortest_low = -6,
// decimal_in_shortest_high = 21,
// EMIT_POSITIVE_EXPONENT_SIGN activated, and
// EMIT_TRAILING_DECIMAL_POINT deactived:
// ToShortest(0.000001) -> "0.000001"
// ToShortest(0.0000001) -> "1e-7"
// ToShortest(111111111111111111111.0) -> "111111111111111110000"
// ToShortest(100000000000000000000.0) -> "100000000000000000000"
// ToShortest(1111111111111111111111.0) -> "1.1111111111111111e+21"
//
// Note: the conversion may round the output if the returned string
// is accurate enough to uniquely identify the input-number.
// For example the most precise representation of the double 9e59 equals
// "899999999999999918767229449717619953810131273674690656206848", but
// the converter will return the shorter (but still correct) "9e59".
//
// Returns true if the conversion succeeds. The conversion always succeeds
// except when the input value is special and no infinity_symbol or
// nan_symbol has been given to the constructor.
bool ToShortest(double value, StringBuilder* result_builder) const {
return ToShortestIeeeNumber(value, result_builder, SHORTEST);
}
// Same as ToShortest, but for single-precision floats.
bool ToShortestSingle(float value, StringBuilder* result_builder) const {
return ToShortestIeeeNumber(value, result_builder, SHORTEST_SINGLE);
}
// Computes a decimal representation with a fixed number of digits after the
// decimal point. The last emitted digit is rounded.
//
// Examples:
// ToFixed(3.12, 1) -> "3.1"
// ToFixed(3.1415, 3) -> "3.142"
// ToFixed(1234.56789, 4) -> "1234.5679"
// ToFixed(1.23, 5) -> "1.23000"
// ToFixed(0.1, 4) -> "0.1000"
// ToFixed(1e30, 2) -> "1000000000000000019884624838656.00"
// ToFixed(0.1, 30) -> "0.100000000000000005551115123126"
// ToFixed(0.1, 17) -> "0.10000000000000001"
//
// If requested_digits equals 0, then the tail of the result depends on
// the EMIT_TRAILING_DECIMAL_POINT and EMIT_TRAILING_ZERO_AFTER_POINT.
// Examples, for requested_digits == 0,
// let EMIT_TRAILING_DECIMAL_POINT and EMIT_TRAILING_ZERO_AFTER_POINT be
// - false and false: then 123.45 -> 123
// 0.678 -> 1
// - true and false: then 123.45 -> 123.
// 0.678 -> 1.
// - true and true: then 123.45 -> 123.0
// 0.678 -> 1.0
//
// Returns true if the conversion succeeds. The conversion always succeeds
// except for the following cases:
// - the input value is special and no infinity_symbol or nan_symbol has
// been provided to the constructor,
// - 'value' > 10^kMaxFixedDigitsBeforePoint, or
// - 'requested_digits' > kMaxFixedDigitsAfterPoint.
// The last two conditions imply that the result will never contain more than
// 1 + kMaxFixedDigitsBeforePoint + 1 + kMaxFixedDigitsAfterPoint characters
// (one additional character for the sign, and one for the decimal point).
bool ToFixed(double value,
int requested_digits,
StringBuilder* result_builder) const;
// Computes a representation in exponential format with requested_digits
// after the decimal point. The last emitted digit is rounded.
// If requested_digits equals -1, then the shortest exponential representation
// is computed.
//
// Examples with EMIT_POSITIVE_EXPONENT_SIGN deactivated, and
// exponent_character set to 'e'.
// ToExponential(3.12, 1) -> "3.1e0"
// ToExponential(5.0, 3) -> "5.000e0"
// ToExponential(0.001, 2) -> "1.00e-3"
// ToExponential(3.1415, -1) -> "3.1415e0"
// ToExponential(3.1415, 4) -> "3.1415e0"
// ToExponential(3.1415, 3) -> "3.142e0"
// ToExponential(123456789000000, 3) -> "1.235e14"
// ToExponential(1000000000000000019884624838656.0, -1) -> "1e30"
// ToExponential(1000000000000000019884624838656.0, 32) ->
// "1.00000000000000001988462483865600e30"
// ToExponential(1234, 0) -> "1e3"
//
// Returns true if the conversion succeeds. The conversion always succeeds
// except for the following cases:
// - the input value is special and no infinity_symbol or nan_symbol has
// been provided to the constructor,
// - 'requested_digits' > kMaxExponentialDigits.
// The last condition implies that the result will never contain more than
// kMaxExponentialDigits + 8 characters (the sign, the digit before the
// decimal point, the decimal point, the exponent character, the
// exponent's sign, and at most 3 exponent digits).
bool ToExponential(double value,
int requested_digits,
StringBuilder* result_builder) const;
// Computes 'precision' leading digits of the given 'value' and returns them
// either in exponential or decimal format, depending on
// max_{leading|trailing}_padding_zeroes_in_precision_mode (given to the
// constructor).
// The last computed digit is rounded.
//
// Example with max_leading_padding_zeroes_in_precision_mode = 6.
// ToPrecision(0.0000012345, 2) -> "0.0000012"
// ToPrecision(0.00000012345, 2) -> "1.2e-7"
// Similarily the converter may add up to
// max_trailing_padding_zeroes_in_precision_mode in precision mode to avoid
// returning an exponential representation. A zero added by the
// EMIT_TRAILING_ZERO_AFTER_POINT flag is counted for this limit.
// Examples for max_trailing_padding_zeroes_in_precision_mode = 1:
// ToPrecision(230.0, 2) -> "230"
// ToPrecision(230.0, 2) -> "230." with EMIT_TRAILING_DECIMAL_POINT.
// ToPrecision(230.0, 2) -> "2.3e2" with EMIT_TRAILING_ZERO_AFTER_POINT.
// Examples for max_trailing_padding_zeroes_in_precision_mode = 3, and no
// EMIT_TRAILING_ZERO_AFTER_POINT:
// ToPrecision(123450.0, 6) -> "123450"
// ToPrecision(123450.0, 5) -> "123450"
// ToPrecision(123450.0, 4) -> "123500"
// ToPrecision(123450.0, 3) -> "123000"
// ToPrecision(123450.0, 2) -> "1.2e5"
//
// Returns true if the conversion succeeds. The conversion always succeeds
// except for the following cases:
// - the input value is special and no infinity_symbol or nan_symbol has
// been provided to the constructor,
// - precision < kMinPericisionDigits
// - precision > kMaxPrecisionDigits
// The last condition implies that the result will never contain more than
// kMaxPrecisionDigits + 7 characters (the sign, the decimal point, the
// exponent character, the exponent's sign, and at most 3 exponent digits).
bool ToPrecision(double value,
int precision,
StringBuilder* result_builder) const;
enum DtoaMode {
// Produce the shortest correct representation.
// For example the output of 0.299999999999999988897 is (the less accurate
// but correct) 0.3.
SHORTEST,
// Same as SHORTEST, but for single-precision floats.
SHORTEST_SINGLE,
// Produce a fixed number of digits after the decimal point.
// For instance fixed(0.1, 4) becomes 0.1000
// If the input number is big, the output will be big.
FIXED,
// Fixed number of digits (independent of the decimal point).
PRECISION
};
// The maximal number of digits that are needed to emit a double in base 10.
// A higher precision can be achieved by using more digits, but the shortest
// accurate representation of any double will never use more digits than
// kBase10MaximalLength.
// Note that DoubleToAscii null-terminates its input. So the given buffer
// should be at least kBase10MaximalLength + 1 characters long.
static const int kBase10MaximalLength = 17;
// Converts the given double 'v' to ascii. 'v' must not be NaN, +Infinity, or
// -Infinity. In SHORTEST_SINGLE-mode this restriction also applies to 'v'
// after it has been casted to a single-precision float. That is, in this
// mode static_cast<float>(v) must not be NaN, +Infinity or -Infinity.
//
// The result should be interpreted as buffer * 10^(point-length).
//
// The output depends on the given mode:
// - SHORTEST: produce the least amount of digits for which the internal
// identity requirement is still satisfied. If the digits are printed
// (together with the correct exponent) then reading this number will give
// 'v' again. The buffer will choose the representation that is closest to
// 'v'. If there are two at the same distance, than the one farther away
// from 0 is chosen (halfway cases - ending with 5 - are rounded up).
// In this mode the 'requested_digits' parameter is ignored.
// - SHORTEST_SINGLE: same as SHORTEST but with single-precision.
// - FIXED: produces digits necessary to print a given number with
// 'requested_digits' digits after the decimal point. The produced digits
// might be too short in which case the caller has to fill the remainder
// with '0's.
// Example: toFixed(0.001, 5) is allowed to return buffer="1", point=-2.
// Halfway cases are rounded towards +/-Infinity (away from 0). The call
// toFixed(0.15, 2) thus returns buffer="2", point=0.
// The returned buffer may contain digits that would be truncated from the
// shortest representation of the input.
// - PRECISION: produces 'requested_digits' where the first digit is not '0'.
// Even though the length of produced digits usually equals
// 'requested_digits', the function is allowed to return fewer digits, in
// which case the caller has to fill the missing digits with '0's.
// Halfway cases are again rounded away from 0.
// DoubleToAscii expects the given buffer to be big enough to hold all
// digits and a terminating null-character. In SHORTEST-mode it expects a
// buffer of at least kBase10MaximalLength + 1. In all other modes the
// requested_digits parameter and the padding-zeroes limit the size of the
// output. Don't forget the decimal point, the exponent character and the
// terminating null-character when computing the maximal output size.
// The given length is only used in debug mode to ensure the buffer is big
// enough.
static void DoubleToAscii(double v,
DtoaMode mode,
int requested_digits,
char* buffer,
int buffer_length,
bool* sign,
int* length,
int* point);
private:
// Implementation for ToShortest and ToShortestSingle.
bool ToShortestIeeeNumber(double value,
StringBuilder* result_builder,
DtoaMode mode) const;
// If the value is a special value (NaN or Infinity) constructs the
// corresponding string using the configured infinity/nan-symbol.
// If either of them is NULL or the value is not special then the
// function returns false.
bool HandleSpecialValues(double value, StringBuilder* result_builder) const;
// Constructs an exponential representation (i.e. 1.234e56).
// The given exponent assumes a decimal point after the first decimal digit.
void CreateExponentialRepresentation(const char* decimal_digits,
int length,
int exponent,
StringBuilder* result_builder) const;
// Creates a decimal representation (i.e 1234.5678).
void CreateDecimalRepresentation(const char* decimal_digits,
int length,
int decimal_point,
int digits_after_point,
StringBuilder* result_builder) const;
const int flags_;
const char* const infinity_symbol_;
const char* const nan_symbol_;
const char exponent_character_;
const int decimal_in_shortest_low_;
const int decimal_in_shortest_high_;
const int max_leading_padding_zeroes_in_precision_mode_;
const int max_trailing_padding_zeroes_in_precision_mode_;
DISALLOW_IMPLICIT_CONSTRUCTORS(DoubleToStringConverter);
};
class StringToDoubleConverter {
public:
// Enumeration for allowing octals and ignoring junk when converting
// strings to numbers.
enum Flags {
NO_FLAGS = 0,
ALLOW_HEX = 1,
ALLOW_OCTALS = 2,
ALLOW_TRAILING_JUNK = 4,
ALLOW_LEADING_SPACES = 8,
ALLOW_TRAILING_SPACES = 16,
ALLOW_SPACES_AFTER_SIGN = 32
};
// Flags should be a bit-or combination of the possible Flags-enum.
// - NO_FLAGS: no special flags.
// - ALLOW_HEX: recognizes the prefix "0x". Hex numbers may only be integers.
// Ex: StringToDouble("0x1234") -> 4660.0
// In StringToDouble("0x1234.56") the characters ".56" are trailing
// junk. The result of the call is hence dependent on
// the ALLOW_TRAILING_JUNK flag and/or the junk value.
// With this flag "0x" is a junk-string. Even with ALLOW_TRAILING_JUNK,
// the string will not be parsed as "0" followed by junk.
//
// - ALLOW_OCTALS: recognizes the prefix "0" for octals:
// If a sequence of octal digits starts with '0', then the number is
// read as octal integer. Octal numbers may only be integers.
// Ex: StringToDouble("01234") -> 668.0
// StringToDouble("012349") -> 12349.0 // Not a sequence of octal
// // digits.
// In StringToDouble("01234.56") the characters ".56" are trailing
// junk. The result of the call is hence dependent on
// the ALLOW_TRAILING_JUNK flag and/or the junk value.
// In StringToDouble("01234e56") the characters "e56" are trailing
// junk, too.
// - ALLOW_TRAILING_JUNK: ignore trailing characters that are not part of
// a double literal.
// - ALLOW_LEADING_SPACES: skip over leading spaces.
// - ALLOW_TRAILING_SPACES: ignore trailing spaces.
// - ALLOW_SPACES_AFTER_SIGN: ignore spaces after the sign.
// Ex: StringToDouble("- 123.2") -> -123.2.
// StringToDouble("+ 123.2") -> 123.2
//
// empty_string_value is returned when an empty string is given as input.
// If ALLOW_LEADING_SPACES or ALLOW_TRAILING_SPACES are set, then a string
// containing only spaces is converted to the 'empty_string_value', too.
//
// junk_string_value is returned when
// a) ALLOW_TRAILING_JUNK is not set, and a junk character (a character not
// part of a double-literal) is found.
// b) ALLOW_TRAILING_JUNK is set, but the string does not start with a
// double literal.
//
// infinity_symbol and nan_symbol are strings that are used to detect
// inputs that represent infinity and NaN. They can be null, in which case
// they are ignored.
// The conversion routine first reads any possible signs. Then it compares the
// following character of the input-string with the first character of
// the infinity, and nan-symbol. If either matches, the function assumes, that
// a match has been found, and expects the following input characters to match
// the remaining characters of the special-value symbol.
// This means that the following restrictions apply to special-value symbols:
// - they must not start with signs ('+', or '-'),
// - they must not have the same first character.
// - they must not start with digits.
//
// Examples:
// flags = ALLOW_HEX | ALLOW_TRAILING_JUNK,
// empty_string_value = 0.0,
// junk_string_value = NaN,
// infinity_symbol = "infinity",
// nan_symbol = "nan":
// StringToDouble("0x1234") -> 4660.0.
// StringToDouble("0x1234K") -> 4660.0.
// StringToDouble("") -> 0.0 // empty_string_value.
// StringToDouble(" ") -> NaN // junk_string_value.
// StringToDouble(" 1") -> NaN // junk_string_value.
// StringToDouble("0x") -> NaN // junk_string_value.
// StringToDouble("-123.45") -> -123.45.
// StringToDouble("--123.45") -> NaN // junk_string_value.
// StringToDouble("123e45") -> 123e45.
// StringToDouble("123E45") -> 123e45.
// StringToDouble("123e+45") -> 123e45.
// StringToDouble("123E-45") -> 123e-45.
// StringToDouble("123e") -> 123.0 // trailing junk ignored.
// StringToDouble("123e-") -> 123.0 // trailing junk ignored.
// StringToDouble("+NaN") -> NaN // NaN string literal.
// StringToDouble("-infinity") -> -inf. // infinity literal.
// StringToDouble("Infinity") -> NaN // junk_string_value.
//
// flags = ALLOW_OCTAL | ALLOW_LEADING_SPACES,
// empty_string_value = 0.0,
// junk_string_value = NaN,
// infinity_symbol = NULL,
// nan_symbol = NULL:
// StringToDouble("0x1234") -> NaN // junk_string_value.
// StringToDouble("01234") -> 668.0.
// StringToDouble("") -> 0.0 // empty_string_value.
// StringToDouble(" ") -> 0.0 // empty_string_value.
// StringToDouble(" 1") -> 1.0
// StringToDouble("0x") -> NaN // junk_string_value.
// StringToDouble("0123e45") -> NaN // junk_string_value.
// StringToDouble("01239E45") -> 1239e45.
// StringToDouble("-infinity") -> NaN // junk_string_value.
// StringToDouble("NaN") -> NaN // junk_string_value.
StringToDoubleConverter(int flags,
double empty_string_value,
double junk_string_value,
const char* infinity_symbol,
const char* nan_symbol)
: flags_(flags),
empty_string_value_(empty_string_value),
junk_string_value_(junk_string_value),
infinity_symbol_(infinity_symbol),
nan_symbol_(nan_symbol) {
}
// Performs the conversion.
// The output parameter 'processed_characters_count' is set to the number
// of characters that have been processed to read the number.
// Spaces than are processed with ALLOW_{LEADING|TRAILING}_SPACES are included
// in the 'processed_characters_count'. Trailing junk is never included.
double StringToDouble(const char* buffer,
int length,
int* processed_characters_count) const {
return StringToIeee(buffer, length, processed_characters_count, true);
}
// Same as StringToDouble but reads a float.
// Note that this is not equivalent to static_cast<float>(StringToDouble(...))
// due to potential double-rounding.
float StringToFloat(const char* buffer,
int length,
int* processed_characters_count) const {
return static_cast<float>(StringToIeee(buffer, length,
processed_characters_count, false));
}
private:
const int flags_;
const double empty_string_value_;
const double junk_string_value_;
const char* const infinity_symbol_;
const char* const nan_symbol_;
double StringToIeee(const char* buffer,
int length,
int* processed_characters_count,
bool read_as_double) const;
DISALLOW_IMPLICIT_CONSTRUCTORS(StringToDoubleConverter);
};
} // namespace double_conversion
#endif // DOUBLE_CONVERSION_DOUBLE_CONVERSION_H_

View File

@ -0,0 +1,665 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "fast-dtoa.h"
#include "cached-powers.h"
#include "diy-fp.h"
#include "ieee.h"
namespace double_conversion {
// The minimal and maximal target exponent define the range of w's binary
// exponent, where 'w' is the result of multiplying the input by a cached power
// of ten.
//
// A different range might be chosen on a different platform, to optimize digit
// generation, but a smaller range requires more powers of ten to be cached.
static const int kMinimalTargetExponent = -60;
static const int kMaximalTargetExponent = -32;
// Adjusts the last digit of the generated number, and screens out generated
// solutions that may be inaccurate. A solution may be inaccurate if it is
// outside the safe interval, or if we cannot prove that it is closer to the
// input than a neighboring representation of the same length.
//
// Input: * buffer containing the digits of too_high / 10^kappa
// * the buffer's length
// * distance_too_high_w == (too_high - w).f() * unit
// * unsafe_interval == (too_high - too_low).f() * unit
// * rest = (too_high - buffer * 10^kappa).f() * unit
// * ten_kappa = 10^kappa * unit
// * unit = the common multiplier
// Output: returns true if the buffer is guaranteed to contain the closest
// representable number to the input.
// Modifies the generated digits in the buffer to approach (round towards) w.
static bool RoundWeed(Vector<char> buffer,
int length,
uint64_t distance_too_high_w,
uint64_t unsafe_interval,
uint64_t rest,
uint64_t ten_kappa,
uint64_t unit) {
uint64_t small_distance = distance_too_high_w - unit;
uint64_t big_distance = distance_too_high_w + unit;
// Let w_low = too_high - big_distance, and
// w_high = too_high - small_distance.
// Note: w_low < w < w_high
//
// The real w (* unit) must lie somewhere inside the interval
// ]w_low; w_high[ (often written as "(w_low; w_high)")
// Basically the buffer currently contains a number in the unsafe interval
// ]too_low; too_high[ with too_low < w < too_high
//
// too_high - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// ^v 1 unit ^ ^ ^ ^
// boundary_high --------------------- . . . .
// ^v 1 unit . . . .
// - - - - - - - - - - - - - - - - - - - + - - + - - - - - - . .
// . . ^ . .
// . big_distance . . .
// . . . . rest
// small_distance . . . .
// v . . . .
// w_high - - - - - - - - - - - - - - - - - - . . . .
// ^v 1 unit . . . .
// w ---------------------------------------- . . . .
// ^v 1 unit v . . .
// w_low - - - - - - - - - - - - - - - - - - - - - . . .
// . . v
// buffer --------------------------------------------------+-------+--------
// . .
// safe_interval .
// v .
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - .
// ^v 1 unit .
// boundary_low ------------------------- unsafe_interval
// ^v 1 unit v
// too_low - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
//
//
// Note that the value of buffer could lie anywhere inside the range too_low
// to too_high.
//
// boundary_low, boundary_high and w are approximations of the real boundaries
// and v (the input number). They are guaranteed to be precise up to one unit.
// In fact the error is guaranteed to be strictly less than one unit.
//
// Anything that lies outside the unsafe interval is guaranteed not to round
// to v when read again.
// Anything that lies inside the safe interval is guaranteed to round to v
// when read again.
// If the number inside the buffer lies inside the unsafe interval but not
// inside the safe interval then we simply do not know and bail out (returning
// false).
//
// Similarly we have to take into account the imprecision of 'w' when finding
// the closest representation of 'w'. If we have two potential
// representations, and one is closer to both w_low and w_high, then we know
// it is closer to the actual value v.
//
// By generating the digits of too_high we got the largest (closest to
// too_high) buffer that is still in the unsafe interval. In the case where
// w_high < buffer < too_high we try to decrement the buffer.
// This way the buffer approaches (rounds towards) w.
// There are 3 conditions that stop the decrementation process:
// 1) the buffer is already below w_high
// 2) decrementing the buffer would make it leave the unsafe interval
// 3) decrementing the buffer would yield a number below w_high and farther
// away than the current number. In other words:
// (buffer{-1} < w_high) && w_high - buffer{-1} > buffer - w_high
// Instead of using the buffer directly we use its distance to too_high.
// Conceptually rest ~= too_high - buffer
// We need to do the following tests in this order to avoid over- and
// underflows.
ASSERT(rest <= unsafe_interval);
while (rest < small_distance && // Negated condition 1
unsafe_interval - rest >= ten_kappa && // Negated condition 2
(rest + ten_kappa < small_distance || // buffer{-1} > w_high
small_distance - rest >= rest + ten_kappa - small_distance)) {
buffer[length - 1]--;
rest += ten_kappa;
}
// We have approached w+ as much as possible. We now test if approaching w-
// would require changing the buffer. If yes, then we have two possible
// representations close to w, but we cannot decide which one is closer.
if (rest < big_distance &&
unsafe_interval - rest >= ten_kappa &&
(rest + ten_kappa < big_distance ||
big_distance - rest > rest + ten_kappa - big_distance)) {
return false;
}
// Weeding test.
// The safe interval is [too_low + 2 ulp; too_high - 2 ulp]
// Since too_low = too_high - unsafe_interval this is equivalent to
// [too_high - unsafe_interval + 4 ulp; too_high - 2 ulp]
// Conceptually we have: rest ~= too_high - buffer
return (2 * unit <= rest) && (rest <= unsafe_interval - 4 * unit);
}
// Rounds the buffer upwards if the result is closer to v by possibly adding
// 1 to the buffer. If the precision of the calculation is not sufficient to
// round correctly, return false.
// The rounding might shift the whole buffer in which case the kappa is
// adjusted. For example "99", kappa = 3 might become "10", kappa = 4.
//
// If 2*rest > ten_kappa then the buffer needs to be round up.
// rest can have an error of +/- 1 unit. This function accounts for the
// imprecision and returns false, if the rounding direction cannot be
// unambiguously determined.
//
// Precondition: rest < ten_kappa.
static bool RoundWeedCounted(Vector<char> buffer,
int length,
uint64_t rest,
uint64_t ten_kappa,
uint64_t unit,
int* kappa) {
ASSERT(rest < ten_kappa);
// The following tests are done in a specific order to avoid overflows. They
// will work correctly with any uint64 values of rest < ten_kappa and unit.
//
// If the unit is too big, then we don't know which way to round. For example
// a unit of 50 means that the real number lies within rest +/- 50. If
// 10^kappa == 40 then there is no way to tell which way to round.
if (unit >= ten_kappa) return false;
// Even if unit is just half the size of 10^kappa we are already completely
// lost. (And after the previous test we know that the expression will not
// over/underflow.)
if (ten_kappa - unit <= unit) return false;
// If 2 * (rest + unit) <= 10^kappa we can safely round down.
if ((ten_kappa - rest > rest) && (ten_kappa - 2 * rest >= 2 * unit)) {
return true;
}
// If 2 * (rest - unit) >= 10^kappa, then we can safely round up.
if ((rest > unit) && (ten_kappa - (rest - unit) <= (rest - unit))) {
// Increment the last digit recursively until we find a non '9' digit.
buffer[length - 1]++;
for (int i = length - 1; i > 0; --i) {
if (buffer[i] != '0' + 10) break;
buffer[i] = '0';
buffer[i - 1]++;
}
// If the first digit is now '0'+ 10 we had a buffer with all '9's. With the
// exception of the first digit all digits are now '0'. Simply switch the
// first digit to '1' and adjust the kappa. Example: "99" becomes "10" and
// the power (the kappa) is increased.
if (buffer[0] == '0' + 10) {
buffer[0] = '1';
(*kappa) += 1;
}
return true;
}
return false;
}
// Returns the biggest power of ten that is less than or equal to the given
// number. We furthermore receive the maximum number of bits 'number' has.
//
// Returns power == 10^(exponent_plus_one-1) such that
// power <= number < power * 10.
// If number_bits == 0 then 0^(0-1) is returned.
// The number of bits must be <= 32.
// Precondition: number < (1 << (number_bits + 1)).
// Inspired by the method for finding an integer log base 10 from here:
// http://graphics.stanford.edu/~seander/bithacks.html#IntegerLog10
static unsigned int const kSmallPowersOfTen[] =
{0, 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000,
1000000000};
static void BiggestPowerTen(uint32_t number,
int number_bits,
uint32_t* power,
int* exponent_plus_one) {
ASSERT(number < (1u << (number_bits + 1)));
// 1233/4096 is approximately 1/lg(10).
int exponent_plus_one_guess = ((number_bits + 1) * 1233 >> 12);
// We increment to skip over the first entry in the kPowersOf10 table.
// Note: kPowersOf10[i] == 10^(i-1).
exponent_plus_one_guess++;
// We don't have any guarantees that 2^number_bits <= number.
if (number < kSmallPowersOfTen[exponent_plus_one_guess]) {
exponent_plus_one_guess--;
}
*power = kSmallPowersOfTen[exponent_plus_one_guess];
*exponent_plus_one = exponent_plus_one_guess;
}
// Generates the digits of input number w.
// w is a floating-point number (DiyFp), consisting of a significand and an
// exponent. Its exponent is bounded by kMinimalTargetExponent and
// kMaximalTargetExponent.
// Hence -60 <= w.e() <= -32.
//
// Returns false if it fails, in which case the generated digits in the buffer
// should not be used.
// Preconditions:
// * low, w and high are correct up to 1 ulp (unit in the last place). That
// is, their error must be less than a unit of their last digits.
// * low.e() == w.e() == high.e()
// * low < w < high, and taking into account their error: low~ <= high~
// * kMinimalTargetExponent <= w.e() <= kMaximalTargetExponent
// Postconditions: returns false if procedure fails.
// otherwise:
// * buffer is not null-terminated, but len contains the number of digits.
// * buffer contains the shortest possible decimal digit-sequence
// such that LOW < buffer * 10^kappa < HIGH, where LOW and HIGH are the
// correct values of low and high (without their error).
// * if more than one decimal representation gives the minimal number of
// decimal digits then the one closest to W (where W is the correct value
// of w) is chosen.
// Remark: this procedure takes into account the imprecision of its input
// numbers. If the precision is not enough to guarantee all the postconditions
// then false is returned. This usually happens rarely (~0.5%).
//
// Say, for the sake of example, that
// w.e() == -48, and w.f() == 0x1234567890abcdef
// w's value can be computed by w.f() * 2^w.e()
// We can obtain w's integral digits by simply shifting w.f() by -w.e().
// -> w's integral part is 0x1234
// w's fractional part is therefore 0x567890abcdef.
// Printing w's integral part is easy (simply print 0x1234 in decimal).
// In order to print its fraction we repeatedly multiply the fraction by 10 and
// get each digit. Example the first digit after the point would be computed by
// (0x567890abcdef * 10) >> 48. -> 3
// The whole thing becomes slightly more complicated because we want to stop
// once we have enough digits. That is, once the digits inside the buffer
// represent 'w' we can stop. Everything inside the interval low - high
// represents w. However we have to pay attention to low, high and w's
// imprecision.
static bool DigitGen(DiyFp low,
DiyFp w,
DiyFp high,
Vector<char> buffer,
int* length,
int* kappa) {
ASSERT(low.e() == w.e() && w.e() == high.e());
ASSERT(low.f() + 1 <= high.f() - 1);
ASSERT(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent);
// low, w and high are imprecise, but by less than one ulp (unit in the last
// place).
// If we remove (resp. add) 1 ulp from low (resp. high) we are certain that
// the new numbers are outside of the interval we want the final
// representation to lie in.
// Inversely adding (resp. removing) 1 ulp from low (resp. high) would yield
// numbers that are certain to lie in the interval. We will use this fact
// later on.
// We will now start by generating the digits within the uncertain
// interval. Later we will weed out representations that lie outside the safe
// interval and thus _might_ lie outside the correct interval.
uint64_t unit = 1;
DiyFp too_low = DiyFp(low.f() - unit, low.e());
DiyFp too_high = DiyFp(high.f() + unit, high.e());
// too_low and too_high are guaranteed to lie outside the interval we want the
// generated number in.
DiyFp unsafe_interval = DiyFp::Minus(too_high, too_low);
// We now cut the input number into two parts: the integral digits and the
// fractionals. We will not write any decimal separator though, but adapt
// kappa instead.
// Reminder: we are currently computing the digits (stored inside the buffer)
// such that: too_low < buffer * 10^kappa < too_high
// We use too_high for the digit_generation and stop as soon as possible.
// If we stop early we effectively round down.
DiyFp one = DiyFp(static_cast<uint64_t>(1) << -w.e(), w.e());
// Division by one is a shift.
uint32_t integrals = static_cast<uint32_t>(too_high.f() >> -one.e());
// Modulo by one is an and.
uint64_t fractionals = too_high.f() & (one.f() - 1);
uint32_t divisor;
int divisor_exponent_plus_one;
BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()),
&divisor, &divisor_exponent_plus_one);
*kappa = divisor_exponent_plus_one;
*length = 0;
// Loop invariant: buffer = too_high / 10^kappa (integer division)
// The invariant holds for the first iteration: kappa has been initialized
// with the divisor exponent + 1. And the divisor is the biggest power of ten
// that is smaller than integrals.
while (*kappa > 0) {
int digit = integrals / divisor;
ASSERT(digit <= 9);
buffer[*length] = static_cast<char>('0' + digit);
(*length)++;
integrals %= divisor;
(*kappa)--;
// Note that kappa now equals the exponent of the divisor and that the
// invariant thus holds again.
uint64_t rest =
(static_cast<uint64_t>(integrals) << -one.e()) + fractionals;
// Invariant: too_high = buffer * 10^kappa + DiyFp(rest, one.e())
// Reminder: unsafe_interval.e() == one.e()
if (rest < unsafe_interval.f()) {
// Rounding down (by not emitting the remaining digits) yields a number
// that lies within the unsafe interval.
return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f(),
unsafe_interval.f(), rest,
static_cast<uint64_t>(divisor) << -one.e(), unit);
}
divisor /= 10;
}
// The integrals have been generated. We are at the point of the decimal
// separator. In the following loop we simply multiply the remaining digits by
// 10 and divide by one. We just need to pay attention to multiply associated
// data (like the interval or 'unit'), too.
// Note that the multiplication by 10 does not overflow, because w.e >= -60
// and thus one.e >= -60.
ASSERT(one.e() >= -60);
ASSERT(fractionals < one.f());
ASSERT(UINT64_2PART_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f());
for (;;) {
fractionals *= 10;
unit *= 10;
unsafe_interval.set_f(unsafe_interval.f() * 10);
// Integer division by one.
int digit = static_cast<int>(fractionals >> -one.e());
ASSERT(digit <= 9);
buffer[*length] = static_cast<char>('0' + digit);
(*length)++;
fractionals &= one.f() - 1; // Modulo by one.
(*kappa)--;
if (fractionals < unsafe_interval.f()) {
return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f() * unit,
unsafe_interval.f(), fractionals, one.f(), unit);
}
}
}
// Generates (at most) requested_digits digits of input number w.
// w is a floating-point number (DiyFp), consisting of a significand and an
// exponent. Its exponent is bounded by kMinimalTargetExponent and
// kMaximalTargetExponent.
// Hence -60 <= w.e() <= -32.
//
// Returns false if it fails, in which case the generated digits in the buffer
// should not be used.
// Preconditions:
// * w is correct up to 1 ulp (unit in the last place). That
// is, its error must be strictly less than a unit of its last digit.
// * kMinimalTargetExponent <= w.e() <= kMaximalTargetExponent
//
// Postconditions: returns false if procedure fails.
// otherwise:
// * buffer is not null-terminated, but length contains the number of
// digits.
// * the representation in buffer is the most precise representation of
// requested_digits digits.
// * buffer contains at most requested_digits digits of w. If there are less
// than requested_digits digits then some trailing '0's have been removed.
// * kappa is such that
// w = buffer * 10^kappa + eps with |eps| < 10^kappa / 2.
//
// Remark: This procedure takes into account the imprecision of its input
// numbers. If the precision is not enough to guarantee all the postconditions
// then false is returned. This usually happens rarely, but the failure-rate
// increases with higher requested_digits.
static bool DigitGenCounted(DiyFp w,
int requested_digits,
Vector<char> buffer,
int* length,
int* kappa) {
ASSERT(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent);
ASSERT(kMinimalTargetExponent >= -60);
ASSERT(kMaximalTargetExponent <= -32);
// w is assumed to have an error less than 1 unit. Whenever w is scaled we
// also scale its error.
uint64_t w_error = 1;
// We cut the input number into two parts: the integral digits and the
// fractional digits. We don't emit any decimal separator, but adapt kappa
// instead. Example: instead of writing "1.2" we put "12" into the buffer and
// increase kappa by 1.
DiyFp one = DiyFp(static_cast<uint64_t>(1) << -w.e(), w.e());
// Division by one is a shift.
uint32_t integrals = static_cast<uint32_t>(w.f() >> -one.e());
// Modulo by one is an and.
uint64_t fractionals = w.f() & (one.f() - 1);
uint32_t divisor;
int divisor_exponent_plus_one;
BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()),
&divisor, &divisor_exponent_plus_one);
*kappa = divisor_exponent_plus_one;
*length = 0;
// Loop invariant: buffer = w / 10^kappa (integer division)
// The invariant holds for the first iteration: kappa has been initialized
// with the divisor exponent + 1. And the divisor is the biggest power of ten
// that is smaller than 'integrals'.
while (*kappa > 0) {
int digit = integrals / divisor;
ASSERT(digit <= 9);
buffer[*length] = static_cast<char>('0' + digit);
(*length)++;
requested_digits--;
integrals %= divisor;
(*kappa)--;
// Note that kappa now equals the exponent of the divisor and that the
// invariant thus holds again.
if (requested_digits == 0) break;
divisor /= 10;
}
if (requested_digits == 0) {
uint64_t rest =
(static_cast<uint64_t>(integrals) << -one.e()) + fractionals;
return RoundWeedCounted(buffer, *length, rest,
static_cast<uint64_t>(divisor) << -one.e(), w_error,
kappa);
}
// The integrals have been generated. We are at the point of the decimal
// separator. In the following loop we simply multiply the remaining digits by
// 10 and divide by one. We just need to pay attention to multiply associated
// data (the 'unit'), too.
// Note that the multiplication by 10 does not overflow, because w.e >= -60
// and thus one.e >= -60.
ASSERT(one.e() >= -60);
ASSERT(fractionals < one.f());
ASSERT(UINT64_2PART_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f());
while (requested_digits > 0 && fractionals > w_error) {
fractionals *= 10;
w_error *= 10;
// Integer division by one.
int digit = static_cast<int>(fractionals >> -one.e());
ASSERT(digit <= 9);
buffer[*length] = static_cast<char>('0' + digit);
(*length)++;
requested_digits--;
fractionals &= one.f() - 1; // Modulo by one.
(*kappa)--;
}
if (requested_digits != 0) return false;
return RoundWeedCounted(buffer, *length, fractionals, one.f(), w_error,
kappa);
}
// Provides a decimal representation of v.
// Returns true if it succeeds, otherwise the result cannot be trusted.
// There will be *length digits inside the buffer (not null-terminated).
// If the function returns true then
// v == (double) (buffer * 10^decimal_exponent).
// The digits in the buffer are the shortest representation possible: no
// 0.09999999999999999 instead of 0.1. The shorter representation will even be
// chosen even if the longer one would be closer to v.
// The last digit will be closest to the actual v. That is, even if several
// digits might correctly yield 'v' when read again, the closest will be
// computed.
static bool Grisu3(double v,
FastDtoaMode mode,
Vector<char> buffer,
int* length,
int* decimal_exponent) {
DiyFp w = Double(v).AsNormalizedDiyFp();
// boundary_minus and boundary_plus are the boundaries between v and its
// closest floating-point neighbors. Any number strictly between
// boundary_minus and boundary_plus will round to v when convert to a double.
// Grisu3 will never output representations that lie exactly on a boundary.
DiyFp boundary_minus, boundary_plus;
if (mode == FAST_DTOA_SHORTEST) {
Double(v).NormalizedBoundaries(&boundary_minus, &boundary_plus);
} else {
ASSERT(mode == FAST_DTOA_SHORTEST_SINGLE);
float single_v = static_cast<float>(v);
Single(single_v).NormalizedBoundaries(&boundary_minus, &boundary_plus);
}
ASSERT(boundary_plus.e() == w.e());
DiyFp ten_mk; // Cached power of ten: 10^-k
int mk; // -k
int ten_mk_minimal_binary_exponent =
kMinimalTargetExponent - (w.e() + DiyFp::kSignificandSize);
int ten_mk_maximal_binary_exponent =
kMaximalTargetExponent - (w.e() + DiyFp::kSignificandSize);
PowersOfTenCache::GetCachedPowerForBinaryExponentRange(
ten_mk_minimal_binary_exponent,
ten_mk_maximal_binary_exponent,
&ten_mk, &mk);
ASSERT((kMinimalTargetExponent <= w.e() + ten_mk.e() +
DiyFp::kSignificandSize) &&
(kMaximalTargetExponent >= w.e() + ten_mk.e() +
DiyFp::kSignificandSize));
// Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a
// 64 bit significand and ten_mk is thus only precise up to 64 bits.
// The DiyFp::Times procedure rounds its result, and ten_mk is approximated
// too. The variable scaled_w (as well as scaled_boundary_minus/plus) are now
// off by a small amount.
// In fact: scaled_w - w*10^k < 1ulp (unit in the last place) of scaled_w.
// In other words: let f = scaled_w.f() and e = scaled_w.e(), then
// (f-1) * 2^e < w*10^k < (f+1) * 2^e
DiyFp scaled_w = DiyFp::Times(w, ten_mk);
ASSERT(scaled_w.e() ==
boundary_plus.e() + ten_mk.e() + DiyFp::kSignificandSize);
// In theory it would be possible to avoid some recomputations by computing
// the difference between w and boundary_minus/plus (a power of 2) and to
// compute scaled_boundary_minus/plus by subtracting/adding from
// scaled_w. However the code becomes much less readable and the speed
// enhancements are not terriffic.
DiyFp scaled_boundary_minus = DiyFp::Times(boundary_minus, ten_mk);
DiyFp scaled_boundary_plus = DiyFp::Times(boundary_plus, ten_mk);
// DigitGen will generate the digits of scaled_w. Therefore we have
// v == (double) (scaled_w * 10^-mk).
// Set decimal_exponent == -mk and pass it to DigitGen. If scaled_w is not an
// integer than it will be updated. For instance if scaled_w == 1.23 then
// the buffer will be filled with "123" und the decimal_exponent will be
// decreased by 2.
int kappa;
bool result = DigitGen(scaled_boundary_minus, scaled_w, scaled_boundary_plus,
buffer, length, &kappa);
*decimal_exponent = -mk + kappa;
return result;
}
// The "counted" version of grisu3 (see above) only generates requested_digits
// number of digits. This version does not generate the shortest representation,
// and with enough requested digits 0.1 will at some point print as 0.9999999...
// Grisu3 is too imprecise for real halfway cases (1.5 will not work) and
// therefore the rounding strategy for halfway cases is irrelevant.
static bool Grisu3Counted(double v,
int requested_digits,
Vector<char> buffer,
int* length,
int* decimal_exponent) {
DiyFp w = Double(v).AsNormalizedDiyFp();
DiyFp ten_mk; // Cached power of ten: 10^-k
int mk; // -k
int ten_mk_minimal_binary_exponent =
kMinimalTargetExponent - (w.e() + DiyFp::kSignificandSize);
int ten_mk_maximal_binary_exponent =
kMaximalTargetExponent - (w.e() + DiyFp::kSignificandSize);
PowersOfTenCache::GetCachedPowerForBinaryExponentRange(
ten_mk_minimal_binary_exponent,
ten_mk_maximal_binary_exponent,
&ten_mk, &mk);
ASSERT((kMinimalTargetExponent <= w.e() + ten_mk.e() +
DiyFp::kSignificandSize) &&
(kMaximalTargetExponent >= w.e() + ten_mk.e() +
DiyFp::kSignificandSize));
// Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a
// 64 bit significand and ten_mk is thus only precise up to 64 bits.
// The DiyFp::Times procedure rounds its result, and ten_mk is approximated
// too. The variable scaled_w (as well as scaled_boundary_minus/plus) are now
// off by a small amount.
// In fact: scaled_w - w*10^k < 1ulp (unit in the last place) of scaled_w.
// In other words: let f = scaled_w.f() and e = scaled_w.e(), then
// (f-1) * 2^e < w*10^k < (f+1) * 2^e
DiyFp scaled_w = DiyFp::Times(w, ten_mk);
// We now have (double) (scaled_w * 10^-mk).
// DigitGen will generate the first requested_digits digits of scaled_w and
// return together with a kappa such that scaled_w ~= buffer * 10^kappa. (It
// will not always be exactly the same since DigitGenCounted only produces a
// limited number of digits.)
int kappa;
bool result = DigitGenCounted(scaled_w, requested_digits,
buffer, length, &kappa);
*decimal_exponent = -mk + kappa;
return result;
}
bool FastDtoa(double v,
FastDtoaMode mode,
int requested_digits,
Vector<char> buffer,
int* length,
int* decimal_point) {
ASSERT(v > 0);
ASSERT(!Double(v).IsSpecial());
bool result = false;
int decimal_exponent = 0;
switch (mode) {
case FAST_DTOA_SHORTEST:
case FAST_DTOA_SHORTEST_SINGLE:
result = Grisu3(v, mode, buffer, length, &decimal_exponent);
break;
case FAST_DTOA_PRECISION:
result = Grisu3Counted(v, requested_digits,
buffer, length, &decimal_exponent);
break;
default:
UNREACHABLE();
}
if (result) {
*decimal_point = *length + decimal_exponent;
buffer[*length] = '\0';
}
return result;
}
} // namespace double_conversion

View File

@ -0,0 +1,88 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef DOUBLE_CONVERSION_FAST_DTOA_H_
#define DOUBLE_CONVERSION_FAST_DTOA_H_
#include "utils.h"
namespace double_conversion {
enum FastDtoaMode {
// Computes the shortest representation of the given input. The returned
// result will be the most accurate number of this length. Longer
// representations might be more accurate.
FAST_DTOA_SHORTEST,
// Same as FAST_DTOA_SHORTEST but for single-precision floats.
FAST_DTOA_SHORTEST_SINGLE,
// Computes a representation where the precision (number of digits) is
// given as input. The precision is independent of the decimal point.
FAST_DTOA_PRECISION
};
// FastDtoa will produce at most kFastDtoaMaximalLength digits. This does not
// include the terminating '\0' character.
static const int kFastDtoaMaximalLength = 17;
// Same for single-precision numbers.
static const int kFastDtoaMaximalSingleLength = 9;
// Provides a decimal representation of v.
// The result should be interpreted as buffer * 10^(point - length).
//
// Precondition:
// * v must be a strictly positive finite double.
//
// Returns true if it succeeds, otherwise the result can not be trusted.
// There will be *length digits inside the buffer followed by a null terminator.
// If the function returns true and mode equals
// - FAST_DTOA_SHORTEST, then
// the parameter requested_digits is ignored.
// The result satisfies
// v == (double) (buffer * 10^(point - length)).
// The digits in the buffer are the shortest representation possible. E.g.
// if 0.099999999999 and 0.1 represent the same double then "1" is returned
// with point = 0.
// The last digit will be closest to the actual v. That is, even if several
// digits might correctly yield 'v' when read again, the buffer will contain
// the one closest to v.
// - FAST_DTOA_PRECISION, then
// the buffer contains requested_digits digits.
// the difference v - (buffer * 10^(point-length)) is closest to zero for
// all possible representations of requested_digits digits.
// If there are two values that are equally close, then FastDtoa returns
// false.
// For both modes the buffer must be large enough to hold the result.
bool FastDtoa(double d,
FastDtoaMode mode,
int requested_digits,
Vector<char> buffer,
int* length,
int* decimal_point);
} // namespace double_conversion
#endif // DOUBLE_CONVERSION_FAST_DTOA_H_

View File

@ -0,0 +1,404 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <math.h>
#include "fixed-dtoa.h"
#include "ieee.h"
namespace double_conversion {
// Represents a 128bit type. This class should be replaced by a native type on
// platforms that support 128bit integers.
class UInt128 {
public:
UInt128() : high_bits_(0), low_bits_(0) { }
UInt128(uint64_t high, uint64_t low) : high_bits_(high), low_bits_(low) { }
void Multiply(uint32_t multiplicand) {
uint64_t accumulator;
accumulator = (low_bits_ & kMask32) * multiplicand;
uint32_t part = static_cast<uint32_t>(accumulator & kMask32);
accumulator >>= 32;
accumulator = accumulator + (low_bits_ >> 32) * multiplicand;
low_bits_ = (accumulator << 32) + part;
accumulator >>= 32;
accumulator = accumulator + (high_bits_ & kMask32) * multiplicand;
part = static_cast<uint32_t>(accumulator & kMask32);
accumulator >>= 32;
accumulator = accumulator + (high_bits_ >> 32) * multiplicand;
high_bits_ = (accumulator << 32) + part;
ASSERT((accumulator >> 32) == 0);
}
void Shift(int shift_amount) {
ASSERT(-64 <= shift_amount && shift_amount <= 64);
if (shift_amount == 0) {
return;
} else if (shift_amount == -64) {
high_bits_ = low_bits_;
low_bits_ = 0;
} else if (shift_amount == 64) {
low_bits_ = high_bits_;
high_bits_ = 0;
} else if (shift_amount <= 0) {
high_bits_ <<= -shift_amount;
high_bits_ += low_bits_ >> (64 + shift_amount);
low_bits_ <<= -shift_amount;
} else {
low_bits_ >>= shift_amount;
low_bits_ += high_bits_ << (64 - shift_amount);
high_bits_ >>= shift_amount;
}
}
// Modifies *this to *this MOD (2^power).
// Returns *this DIV (2^power).
int DivModPowerOf2(int power) {
if (power >= 64) {
int result = static_cast<int>(high_bits_ >> (power - 64));
high_bits_ -= static_cast<uint64_t>(result) << (power - 64);
return result;
} else {
uint64_t part_low = low_bits_ >> power;
uint64_t part_high = high_bits_ << (64 - power);
int result = static_cast<int>(part_low + part_high);
high_bits_ = 0;
low_bits_ -= part_low << power;
return result;
}
}
bool IsZero() const {
return high_bits_ == 0 && low_bits_ == 0;
}
int BitAt(int position) {
if (position >= 64) {
return static_cast<int>(high_bits_ >> (position - 64)) & 1;
} else {
return static_cast<int>(low_bits_ >> position) & 1;
}
}
private:
static const uint64_t kMask32 = 0xFFFFFFFF;
// Value == (high_bits_ << 64) + low_bits_
uint64_t high_bits_;
uint64_t low_bits_;
};
static const int kDoubleSignificandSize = 53; // Includes the hidden bit.
static void FillDigits32FixedLength(uint32_t number, int requested_length,
Vector<char> buffer, int* length) {
for (int i = requested_length - 1; i >= 0; --i) {
buffer[(*length) + i] = '0' + number % 10;
number /= 10;
}
*length += requested_length;
}
static void FillDigits32(uint32_t number, Vector<char> buffer, int* length) {
int number_length = 0;
// We fill the digits in reverse order and exchange them afterwards.
while (number != 0) {
int digit = number % 10;
number /= 10;
buffer[(*length) + number_length] = static_cast<char>('0' + digit);
number_length++;
}
// Exchange the digits.
int i = *length;
int j = *length + number_length - 1;
while (i < j) {
char tmp = buffer[i];
buffer[i] = buffer[j];
buffer[j] = tmp;
i++;
j--;
}
*length += number_length;
}
static void FillDigits64FixedLength(uint64_t number,
Vector<char> buffer, int* length) {
const uint32_t kTen7 = 10000000;
// For efficiency cut the number into 3 uint32_t parts, and print those.
uint32_t part2 = static_cast<uint32_t>(number % kTen7);
number /= kTen7;
uint32_t part1 = static_cast<uint32_t>(number % kTen7);
uint32_t part0 = static_cast<uint32_t>(number / kTen7);
FillDigits32FixedLength(part0, 3, buffer, length);
FillDigits32FixedLength(part1, 7, buffer, length);
FillDigits32FixedLength(part2, 7, buffer, length);
}
static void FillDigits64(uint64_t number, Vector<char> buffer, int* length) {
const uint32_t kTen7 = 10000000;
// For efficiency cut the number into 3 uint32_t parts, and print those.
uint32_t part2 = static_cast<uint32_t>(number % kTen7);
number /= kTen7;
uint32_t part1 = static_cast<uint32_t>(number % kTen7);
uint32_t part0 = static_cast<uint32_t>(number / kTen7);
if (part0 != 0) {
FillDigits32(part0, buffer, length);
FillDigits32FixedLength(part1, 7, buffer, length);
FillDigits32FixedLength(part2, 7, buffer, length);
} else if (part1 != 0) {
FillDigits32(part1, buffer, length);
FillDigits32FixedLength(part2, 7, buffer, length);
} else {
FillDigits32(part2, buffer, length);
}
}
static void RoundUp(Vector<char> buffer, int* length, int* decimal_point) {
// An empty buffer represents 0.
if (*length == 0) {
buffer[0] = '1';
*decimal_point = 1;
*length = 1;
return;
}
// Round the last digit until we either have a digit that was not '9' or until
// we reached the first digit.
buffer[(*length) - 1]++;
for (int i = (*length) - 1; i > 0; --i) {
if (buffer[i] != '0' + 10) {
return;
}
buffer[i] = '0';
buffer[i - 1]++;
}
// If the first digit is now '0' + 10, we would need to set it to '0' and add
// a '1' in front. However we reach the first digit only if all following
// digits had been '9' before rounding up. Now all trailing digits are '0' and
// we simply switch the first digit to '1' and update the decimal-point
// (indicating that the point is now one digit to the right).
if (buffer[0] == '0' + 10) {
buffer[0] = '1';
(*decimal_point)++;
}
}
// The given fractionals number represents a fixed-point number with binary
// point at bit (-exponent).
// Preconditions:
// -128 <= exponent <= 0.
// 0 <= fractionals * 2^exponent < 1
// The buffer holds the result.
// The function will round its result. During the rounding-process digits not
// generated by this function might be updated, and the decimal-point variable
// might be updated. If this function generates the digits 99 and the buffer
// already contained "199" (thus yielding a buffer of "19999") then a
// rounding-up will change the contents of the buffer to "20000".
static void FillFractionals(uint64_t fractionals, int exponent,
int fractional_count, Vector<char> buffer,
int* length, int* decimal_point) {
ASSERT(-128 <= exponent && exponent <= 0);
// 'fractionals' is a fixed-point number, with binary point at bit
// (-exponent). Inside the function the non-converted remainder of fractionals
// is a fixed-point number, with binary point at bit 'point'.
if (-exponent <= 64) {
// One 64 bit number is sufficient.
ASSERT(fractionals >> 56 == 0);
int point = -exponent;
for (int i = 0; i < fractional_count; ++i) {
if (fractionals == 0) break;
// Instead of multiplying by 10 we multiply by 5 and adjust the point
// location. This way the fractionals variable will not overflow.
// Invariant at the beginning of the loop: fractionals < 2^point.
// Initially we have: point <= 64 and fractionals < 2^56
// After each iteration the point is decremented by one.
// Note that 5^3 = 125 < 128 = 2^7.
// Therefore three iterations of this loop will not overflow fractionals
// (even without the subtraction at the end of the loop body). At this
// time point will satisfy point <= 61 and therefore fractionals < 2^point
// and any further multiplication of fractionals by 5 will not overflow.
fractionals *= 5;
point--;
int digit = static_cast<int>(fractionals >> point);
ASSERT(digit <= 9);
buffer[*length] = static_cast<char>('0' + digit);
(*length)++;
fractionals -= static_cast<uint64_t>(digit) << point;
}
// If the first bit after the point is set we have to round up.
if (((fractionals >> (point - 1)) & 1) == 1) {
RoundUp(buffer, length, decimal_point);
}
} else { // We need 128 bits.
ASSERT(64 < -exponent && -exponent <= 128);
UInt128 fractionals128 = UInt128(fractionals, 0);
fractionals128.Shift(-exponent - 64);
int point = 128;
for (int i = 0; i < fractional_count; ++i) {
if (fractionals128.IsZero()) break;
// As before: instead of multiplying by 10 we multiply by 5 and adjust the
// point location.
// This multiplication will not overflow for the same reasons as before.
fractionals128.Multiply(5);
point--;
int digit = fractionals128.DivModPowerOf2(point);
ASSERT(digit <= 9);
buffer[*length] = static_cast<char>('0' + digit);
(*length)++;
}
if (fractionals128.BitAt(point - 1) == 1) {
RoundUp(buffer, length, decimal_point);
}
}
}
// Removes leading and trailing zeros.
// If leading zeros are removed then the decimal point position is adjusted.
static void TrimZeros(Vector<char> buffer, int* length, int* decimal_point) {
while (*length > 0 && buffer[(*length) - 1] == '0') {
(*length)--;
}
int first_non_zero = 0;
while (first_non_zero < *length && buffer[first_non_zero] == '0') {
first_non_zero++;
}
if (first_non_zero != 0) {
for (int i = first_non_zero; i < *length; ++i) {
buffer[i - first_non_zero] = buffer[i];
}
*length -= first_non_zero;
*decimal_point -= first_non_zero;
}
}
bool FastFixedDtoa(double v,
int fractional_count,
Vector<char> buffer,
int* length,
int* decimal_point) {
const uint32_t kMaxUInt32 = 0xFFFFFFFF;
uint64_t significand = Double(v).Significand();
int exponent = Double(v).Exponent();
// v = significand * 2^exponent (with significand a 53bit integer).
// If the exponent is larger than 20 (i.e. we may have a 73bit number) then we
// don't know how to compute the representation. 2^73 ~= 9.5*10^21.
// If necessary this limit could probably be increased, but we don't need
// more.
if (exponent > 20) return false;
if (fractional_count > 20) return false;
*length = 0;
// At most kDoubleSignificandSize bits of the significand are non-zero.
// Given a 64 bit integer we have 11 0s followed by 53 potentially non-zero
// bits: 0..11*..0xxx..53*..xx
if (exponent + kDoubleSignificandSize > 64) {
// The exponent must be > 11.
//
// We know that v = significand * 2^exponent.
// And the exponent > 11.
// We simplify the task by dividing v by 10^17.
// The quotient delivers the first digits, and the remainder fits into a 64
// bit number.
// Dividing by 10^17 is equivalent to dividing by 5^17*2^17.
const uint64_t kFive17 = UINT64_2PART_C(0xB1, A2BC2EC5); // 5^17
uint64_t divisor = kFive17;
int divisor_power = 17;
uint64_t dividend = significand;
uint32_t quotient;
uint64_t remainder;
// Let v = f * 2^e with f == significand and e == exponent.
// Then need q (quotient) and r (remainder) as follows:
// v = q * 10^17 + r
// f * 2^e = q * 10^17 + r
// f * 2^e = q * 5^17 * 2^17 + r
// If e > 17 then
// f * 2^(e-17) = q * 5^17 + r/2^17
// else
// f = q * 5^17 * 2^(17-e) + r/2^e
if (exponent > divisor_power) {
// We only allow exponents of up to 20 and therefore (17 - e) <= 3
dividend <<= exponent - divisor_power;
quotient = static_cast<uint32_t>(dividend / divisor);
remainder = (dividend % divisor) << divisor_power;
} else {
divisor <<= divisor_power - exponent;
quotient = static_cast<uint32_t>(dividend / divisor);
remainder = (dividend % divisor) << exponent;
}
FillDigits32(quotient, buffer, length);
FillDigits64FixedLength(remainder, buffer, length);
*decimal_point = *length;
} else if (exponent >= 0) {
// 0 <= exponent <= 11
significand <<= exponent;
FillDigits64(significand, buffer, length);
*decimal_point = *length;
} else if (exponent > -kDoubleSignificandSize) {
// We have to cut the number.
uint64_t integrals = significand >> -exponent;
uint64_t fractionals = significand - (integrals << -exponent);
if (integrals > kMaxUInt32) {
FillDigits64(integrals, buffer, length);
} else {
FillDigits32(static_cast<uint32_t>(integrals), buffer, length);
}
*decimal_point = *length;
FillFractionals(fractionals, exponent, fractional_count,
buffer, length, decimal_point);
} else if (exponent < -128) {
// This configuration (with at most 20 digits) means that all digits must be
// 0.
ASSERT(fractional_count <= 20);
buffer[0] = '\0';
*length = 0;
*decimal_point = -fractional_count;
} else {
*decimal_point = 0;
FillFractionals(significand, exponent, fractional_count,
buffer, length, decimal_point);
}
TrimZeros(buffer, length, decimal_point);
buffer[*length] = '\0';
if ((*length) == 0) {
// The string is empty and the decimal_point thus has no importance. Mimick
// Gay's dtoa and and set it to -fractional_count.
*decimal_point = -fractional_count;
}
return true;
}
} // namespace double_conversion

View File

@ -0,0 +1,56 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef DOUBLE_CONVERSION_FIXED_DTOA_H_
#define DOUBLE_CONVERSION_FIXED_DTOA_H_
#include "utils.h"
namespace double_conversion {
// Produces digits necessary to print a given number with
// 'fractional_count' digits after the decimal point.
// The buffer must be big enough to hold the result plus one terminating null
// character.
//
// The produced digits might be too short in which case the caller has to fill
// the gaps with '0's.
// Example: FastFixedDtoa(0.001, 5, ...) is allowed to return buffer = "1", and
// decimal_point = -2.
// Halfway cases are rounded towards +/-Infinity (away from 0). The call
// FastFixedDtoa(0.15, 2, ...) thus returns buffer = "2", decimal_point = 0.
// The returned buffer may contain digits that would be truncated from the
// shortest representation of the input.
//
// This method only works for some parameters. If it can't handle the input it
// returns false. The output is null-terminated when the function succeeds.
bool FastFixedDtoa(double v, int fractional_count,
Vector<char> buffer, int* length, int* decimal_point);
} // namespace double_conversion
#endif // DOUBLE_CONVERSION_FIXED_DTOA_H_

View File

@ -0,0 +1,402 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef DOUBLE_CONVERSION_DOUBLE_H_
#define DOUBLE_CONVERSION_DOUBLE_H_
#include "diy-fp.h"
namespace double_conversion {
// We assume that doubles and uint64_t have the same endianness.
static uint64_t double_to_uint64(double d) { return BitCast<uint64_t>(d); }
static double uint64_to_double(uint64_t d64) { return BitCast<double>(d64); }
static uint32_t float_to_uint32(float f) { return BitCast<uint32_t>(f); }
static float uint32_to_float(uint32_t d32) { return BitCast<float>(d32); }
// Helper functions for doubles.
class Double {
public:
static const uint64_t kSignMask = UINT64_2PART_C(0x80000000, 00000000);
static const uint64_t kExponentMask = UINT64_2PART_C(0x7FF00000, 00000000);
static const uint64_t kSignificandMask = UINT64_2PART_C(0x000FFFFF, FFFFFFFF);
static const uint64_t kHiddenBit = UINT64_2PART_C(0x00100000, 00000000);
static const int kPhysicalSignificandSize = 52; // Excludes the hidden bit.
static const int kSignificandSize = 53;
Double() : d64_(0) {}
explicit Double(double d) : d64_(double_to_uint64(d)) {}
explicit Double(uint64_t d64) : d64_(d64) {}
explicit Double(DiyFp diy_fp)
: d64_(DiyFpToUint64(diy_fp)) {}
// The value encoded by this Double must be greater or equal to +0.0.
// It must not be special (infinity, or NaN).
DiyFp AsDiyFp() const {
ASSERT(Sign() > 0);
ASSERT(!IsSpecial());
return DiyFp(Significand(), Exponent());
}
// The value encoded by this Double must be strictly greater than 0.
DiyFp AsNormalizedDiyFp() const {
ASSERT(value() > 0.0);
uint64_t f = Significand();
int e = Exponent();
// The current double could be a denormal.
while ((f & kHiddenBit) == 0) {
f <<= 1;
e--;
}
// Do the final shifts in one go.
f <<= DiyFp::kSignificandSize - kSignificandSize;
e -= DiyFp::kSignificandSize - kSignificandSize;
return DiyFp(f, e);
}
// Returns the double's bit as uint64.
uint64_t AsUint64() const {
return d64_;
}
// Returns the next greater double. Returns +infinity on input +infinity.
double NextDouble() const {
if (d64_ == kInfinity) return Double(kInfinity).value();
if (Sign() < 0 && Significand() == 0) {
// -0.0
return 0.0;
}
if (Sign() < 0) {
return Double(d64_ - 1).value();
} else {
return Double(d64_ + 1).value();
}
}
double PreviousDouble() const {
if (d64_ == (kInfinity | kSignMask)) return -Double::Infinity();
if (Sign() < 0) {
return Double(d64_ + 1).value();
} else {
if (Significand() == 0) return -0.0;
return Double(d64_ - 1).value();
}
}
int Exponent() const {
if (IsDenormal()) return kDenormalExponent;
uint64_t d64 = AsUint64();
int biased_e =
static_cast<int>((d64 & kExponentMask) >> kPhysicalSignificandSize);
return biased_e - kExponentBias;
}
uint64_t Significand() const {
uint64_t d64 = AsUint64();
uint64_t significand = d64 & kSignificandMask;
if (!IsDenormal()) {
return significand + kHiddenBit;
} else {
return significand;
}
}
// Returns true if the double is a denormal.
bool IsDenormal() const {
uint64_t d64 = AsUint64();
return (d64 & kExponentMask) == 0;
}
// We consider denormals not to be special.
// Hence only Infinity and NaN are special.
bool IsSpecial() const {
uint64_t d64 = AsUint64();
return (d64 & kExponentMask) == kExponentMask;
}
bool IsNan() const {
uint64_t d64 = AsUint64();
return ((d64 & kExponentMask) == kExponentMask) &&
((d64 & kSignificandMask) != 0);
}
bool IsInfinite() const {
uint64_t d64 = AsUint64();
return ((d64 & kExponentMask) == kExponentMask) &&
((d64 & kSignificandMask) == 0);
}
int Sign() const {
uint64_t d64 = AsUint64();
return (d64 & kSignMask) == 0? 1: -1;
}
// Precondition: the value encoded by this Double must be greater or equal
// than +0.0.
DiyFp UpperBoundary() const {
ASSERT(Sign() > 0);
return DiyFp(Significand() * 2 + 1, Exponent() - 1);
}
// Computes the two boundaries of this.
// The bigger boundary (m_plus) is normalized. The lower boundary has the same
// exponent as m_plus.
// Precondition: the value encoded by this Double must be greater than 0.
void NormalizedBoundaries(DiyFp* out_m_minus, DiyFp* out_m_plus) const {
ASSERT(value() > 0.0);
DiyFp v = this->AsDiyFp();
DiyFp m_plus = DiyFp::Normalize(DiyFp((v.f() << 1) + 1, v.e() - 1));
DiyFp m_minus;
if (LowerBoundaryIsCloser()) {
m_minus = DiyFp((v.f() << 2) - 1, v.e() - 2);
} else {
m_minus = DiyFp((v.f() << 1) - 1, v.e() - 1);
}
m_minus.set_f(m_minus.f() << (m_minus.e() - m_plus.e()));
m_minus.set_e(m_plus.e());
*out_m_plus = m_plus;
*out_m_minus = m_minus;
}
bool LowerBoundaryIsCloser() const {
// The boundary is closer if the significand is of the form f == 2^p-1 then
// the lower boundary is closer.
// Think of v = 1000e10 and v- = 9999e9.
// Then the boundary (== (v - v-)/2) is not just at a distance of 1e9 but
// at a distance of 1e8.
// The only exception is for the smallest normal: the largest denormal is
// at the same distance as its successor.
// Note: denormals have the same exponent as the smallest normals.
bool physical_significand_is_zero = ((AsUint64() & kSignificandMask) == 0);
return physical_significand_is_zero && (Exponent() != kDenormalExponent);
}
double value() const { return uint64_to_double(d64_); }
// Returns the significand size for a given order of magnitude.
// If v = f*2^e with 2^p-1 <= f <= 2^p then p+e is v's order of magnitude.
// This function returns the number of significant binary digits v will have
// once it's encoded into a double. In almost all cases this is equal to
// kSignificandSize. The only exceptions are denormals. They start with
// leading zeroes and their effective significand-size is hence smaller.
static int SignificandSizeForOrderOfMagnitude(int order) {
if (order >= (kDenormalExponent + kSignificandSize)) {
return kSignificandSize;
}
if (order <= kDenormalExponent) return 0;
return order - kDenormalExponent;
}
static double Infinity() {
return Double(kInfinity).value();
}
static double NaN() {
return Double(kNaN).value();
}
private:
static const int kExponentBias = 0x3FF + kPhysicalSignificandSize;
static const int kDenormalExponent = -kExponentBias + 1;
static const int kMaxExponent = 0x7FF - kExponentBias;
static const uint64_t kInfinity = UINT64_2PART_C(0x7FF00000, 00000000);
static const uint64_t kNaN = UINT64_2PART_C(0x7FF80000, 00000000);
const uint64_t d64_;
static uint64_t DiyFpToUint64(DiyFp diy_fp) {
uint64_t significand = diy_fp.f();
int exponent = diy_fp.e();
while (significand > kHiddenBit + kSignificandMask) {
significand >>= 1;
exponent++;
}
if (exponent >= kMaxExponent) {
return kInfinity;
}
if (exponent < kDenormalExponent) {
return 0;
}
while (exponent > kDenormalExponent && (significand & kHiddenBit) == 0) {
significand <<= 1;
exponent--;
}
uint64_t biased_exponent;
if (exponent == kDenormalExponent && (significand & kHiddenBit) == 0) {
biased_exponent = 0;
} else {
biased_exponent = static_cast<uint64_t>(exponent + kExponentBias);
}
return (significand & kSignificandMask) |
(biased_exponent << kPhysicalSignificandSize);
}
DISALLOW_COPY_AND_ASSIGN(Double);
};
class Single {
public:
static const uint32_t kSignMask = 0x80000000;
static const uint32_t kExponentMask = 0x7F800000;
static const uint32_t kSignificandMask = 0x007FFFFF;
static const uint32_t kHiddenBit = 0x00800000;
static const int kPhysicalSignificandSize = 23; // Excludes the hidden bit.
static const int kSignificandSize = 24;
Single() : d32_(0) {}
explicit Single(float f) : d32_(float_to_uint32(f)) {}
explicit Single(uint32_t d32) : d32_(d32) {}
// The value encoded by this Single must be greater or equal to +0.0.
// It must not be special (infinity, or NaN).
DiyFp AsDiyFp() const {
ASSERT(Sign() > 0);
ASSERT(!IsSpecial());
return DiyFp(Significand(), Exponent());
}
// Returns the single's bit as uint64.
uint32_t AsUint32() const {
return d32_;
}
int Exponent() const {
if (IsDenormal()) return kDenormalExponent;
uint32_t d32 = AsUint32();
int biased_e =
static_cast<int>((d32 & kExponentMask) >> kPhysicalSignificandSize);
return biased_e - kExponentBias;
}
uint32_t Significand() const {
uint32_t d32 = AsUint32();
uint32_t significand = d32 & kSignificandMask;
if (!IsDenormal()) {
return significand + kHiddenBit;
} else {
return significand;
}
}
// Returns true if the single is a denormal.
bool IsDenormal() const {
uint32_t d32 = AsUint32();
return (d32 & kExponentMask) == 0;
}
// We consider denormals not to be special.
// Hence only Infinity and NaN are special.
bool IsSpecial() const {
uint32_t d32 = AsUint32();
return (d32 & kExponentMask) == kExponentMask;
}
bool IsNan() const {
uint32_t d32 = AsUint32();
return ((d32 & kExponentMask) == kExponentMask) &&
((d32 & kSignificandMask) != 0);
}
bool IsInfinite() const {
uint32_t d32 = AsUint32();
return ((d32 & kExponentMask) == kExponentMask) &&
((d32 & kSignificandMask) == 0);
}
int Sign() const {
uint32_t d32 = AsUint32();
return (d32 & kSignMask) == 0? 1: -1;
}
// Computes the two boundaries of this.
// The bigger boundary (m_plus) is normalized. The lower boundary has the same
// exponent as m_plus.
// Precondition: the value encoded by this Single must be greater than 0.
void NormalizedBoundaries(DiyFp* out_m_minus, DiyFp* out_m_plus) const {
ASSERT(value() > 0.0);
DiyFp v = this->AsDiyFp();
DiyFp m_plus = DiyFp::Normalize(DiyFp((v.f() << 1) + 1, v.e() - 1));
DiyFp m_minus;
if (LowerBoundaryIsCloser()) {
m_minus = DiyFp((v.f() << 2) - 1, v.e() - 2);
} else {
m_minus = DiyFp((v.f() << 1) - 1, v.e() - 1);
}
m_minus.set_f(m_minus.f() << (m_minus.e() - m_plus.e()));
m_minus.set_e(m_plus.e());
*out_m_plus = m_plus;
*out_m_minus = m_minus;
}
// Precondition: the value encoded by this Single must be greater or equal
// than +0.0.
DiyFp UpperBoundary() const {
ASSERT(Sign() > 0);
return DiyFp(Significand() * 2 + 1, Exponent() - 1);
}
bool LowerBoundaryIsCloser() const {
// The boundary is closer if the significand is of the form f == 2^p-1 then
// the lower boundary is closer.
// Think of v = 1000e10 and v- = 9999e9.
// Then the boundary (== (v - v-)/2) is not just at a distance of 1e9 but
// at a distance of 1e8.
// The only exception is for the smallest normal: the largest denormal is
// at the same distance as its successor.
// Note: denormals have the same exponent as the smallest normals.
bool physical_significand_is_zero = ((AsUint32() & kSignificandMask) == 0);
return physical_significand_is_zero && (Exponent() != kDenormalExponent);
}
float value() const { return uint32_to_float(d32_); }
static float Infinity() {
return Single(kInfinity).value();
}
static float NaN() {
return Single(kNaN).value();
}
private:
static const int kExponentBias = 0x7F + kPhysicalSignificandSize;
static const int kDenormalExponent = -kExponentBias + 1;
static const int kMaxExponent = 0xFF - kExponentBias;
static const uint32_t kInfinity = 0x7F800000;
static const uint32_t kNaN = 0x7FC00000;
const uint32_t d32_;
DISALLOW_COPY_AND_ASSIGN(Single);
};
} // namespace double_conversion
#endif // DOUBLE_CONVERSION_DOUBLE_H_

View File

@ -0,0 +1,555 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdarg.h>
#include <limits.h>
#include "strtod.h"
#include "bignum.h"
#include "cached-powers.h"
#include "ieee.h"
namespace double_conversion {
// 2^53 = 9007199254740992.
// Any integer with at most 15 decimal digits will hence fit into a double
// (which has a 53bit significand) without loss of precision.
static const int kMaxExactDoubleIntegerDecimalDigits = 15;
// 2^64 = 18446744073709551616 > 10^19
static const int kMaxUint64DecimalDigits = 19;
// Max double: 1.7976931348623157 x 10^308
// Min non-zero double: 4.9406564584124654 x 10^-324
// Any x >= 10^309 is interpreted as +infinity.
// Any x <= 10^-324 is interpreted as 0.
// Note that 2.5e-324 (despite being smaller than the min double) will be read
// as non-zero (equal to the min non-zero double).
static const int kMaxDecimalPower = 309;
static const int kMinDecimalPower = -324;
// 2^64 = 18446744073709551616
static const uint64_t kMaxUint64 = UINT64_2PART_C(0xFFFFFFFF, FFFFFFFF);
static const double exact_powers_of_ten[] = {
1.0, // 10^0
10.0,
100.0,
1000.0,
10000.0,
100000.0,
1000000.0,
10000000.0,
100000000.0,
1000000000.0,
10000000000.0, // 10^10
100000000000.0,
1000000000000.0,
10000000000000.0,
100000000000000.0,
1000000000000000.0,
10000000000000000.0,
100000000000000000.0,
1000000000000000000.0,
10000000000000000000.0,
100000000000000000000.0, // 10^20
1000000000000000000000.0,
// 10^22 = 0x21e19e0c9bab2400000 = 0x878678326eac9 * 2^22
10000000000000000000000.0
};
static const int kExactPowersOfTenSize = ARRAY_SIZE(exact_powers_of_ten);
// Maximum number of significant digits in the decimal representation.
// In fact the value is 772 (see conversions.cc), but to give us some margin
// we round up to 780.
static const int kMaxSignificantDecimalDigits = 780;
static Vector<const char> TrimLeadingZeros(Vector<const char> buffer) {
for (int i = 0; i < buffer.length(); i++) {
if (buffer[i] != '0') {
return buffer.SubVector(i, buffer.length());
}
}
return Vector<const char>(buffer.start(), 0);
}
static Vector<const char> TrimTrailingZeros(Vector<const char> buffer) {
for (int i = buffer.length() - 1; i >= 0; --i) {
if (buffer[i] != '0') {
return buffer.SubVector(0, i + 1);
}
}
return Vector<const char>(buffer.start(), 0);
}
static void CutToMaxSignificantDigits(Vector<const char> buffer,
int exponent,
char* significant_buffer,
int* significant_exponent) {
for (int i = 0; i < kMaxSignificantDecimalDigits - 1; ++i) {
significant_buffer[i] = buffer[i];
}
// The input buffer has been trimmed. Therefore the last digit must be
// different from '0'.
ASSERT(buffer[buffer.length() - 1] != '0');
// Set the last digit to be non-zero. This is sufficient to guarantee
// correct rounding.
significant_buffer[kMaxSignificantDecimalDigits - 1] = '1';
*significant_exponent =
exponent + (buffer.length() - kMaxSignificantDecimalDigits);
}
// Trims the buffer and cuts it to at most kMaxSignificantDecimalDigits.
// If possible the input-buffer is reused, but if the buffer needs to be
// modified (due to cutting), then the input needs to be copied into the
// buffer_copy_space.
static void TrimAndCut(Vector<const char> buffer, int exponent,
char* buffer_copy_space, int space_size,
Vector<const char>* trimmed, int* updated_exponent) {
Vector<const char> left_trimmed = TrimLeadingZeros(buffer);
Vector<const char> right_trimmed = TrimTrailingZeros(left_trimmed);
exponent += left_trimmed.length() - right_trimmed.length();
if (right_trimmed.length() > kMaxSignificantDecimalDigits) {
(void) space_size; // Mark variable as used.
ASSERT(space_size >= kMaxSignificantDecimalDigits);
CutToMaxSignificantDigits(right_trimmed, exponent,
buffer_copy_space, updated_exponent);
*trimmed = Vector<const char>(buffer_copy_space,
kMaxSignificantDecimalDigits);
} else {
*trimmed = right_trimmed;
*updated_exponent = exponent;
}
}
// Reads digits from the buffer and converts them to a uint64.
// Reads in as many digits as fit into a uint64.
// When the string starts with "1844674407370955161" no further digit is read.
// Since 2^64 = 18446744073709551616 it would still be possible read another
// digit if it was less or equal than 6, but this would complicate the code.
static uint64_t ReadUint64(Vector<const char> buffer,
int* number_of_read_digits) {
uint64_t result = 0;
int i = 0;
while (i < buffer.length() && result <= (kMaxUint64 / 10 - 1)) {
int digit = buffer[i++] - '0';
ASSERT(0 <= digit && digit <= 9);
result = 10 * result + digit;
}
*number_of_read_digits = i;
return result;
}
// Reads a DiyFp from the buffer.
// The returned DiyFp is not necessarily normalized.
// If remaining_decimals is zero then the returned DiyFp is accurate.
// Otherwise it has been rounded and has error of at most 1/2 ulp.
static void ReadDiyFp(Vector<const char> buffer,
DiyFp* result,
int* remaining_decimals) {
int read_digits;
uint64_t significand = ReadUint64(buffer, &read_digits);
if (buffer.length() == read_digits) {
*result = DiyFp(significand, 0);
*remaining_decimals = 0;
} else {
// Round the significand.
if (buffer[read_digits] >= '5') {
significand++;
}
// Compute the binary exponent.
int exponent = 0;
*result = DiyFp(significand, exponent);
*remaining_decimals = buffer.length() - read_digits;
}
}
static bool DoubleStrtod(Vector<const char> trimmed,
int exponent,
double* result) {
#if !defined(DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS)
// On x86 the floating-point stack can be 64 or 80 bits wide. If it is
// 80 bits wide (as is the case on Linux) then double-rounding occurs and the
// result is not accurate.
// We know that Windows32 uses 64 bits and is therefore accurate.
// Note that the ARM simulator is compiled for 32bits. It therefore exhibits
// the same problem.
return false;
#endif
if (trimmed.length() <= kMaxExactDoubleIntegerDecimalDigits) {
int read_digits;
// The trimmed input fits into a double.
// If the 10^exponent (resp. 10^-exponent) fits into a double too then we
// can compute the result-double simply by multiplying (resp. dividing) the
// two numbers.
// This is possible because IEEE guarantees that floating-point operations
// return the best possible approximation.
if (exponent < 0 && -exponent < kExactPowersOfTenSize) {
// 10^-exponent fits into a double.
*result = static_cast<double>(ReadUint64(trimmed, &read_digits));
ASSERT(read_digits == trimmed.length());
*result /= exact_powers_of_ten[-exponent];
return true;
}
if (0 <= exponent && exponent < kExactPowersOfTenSize) {
// 10^exponent fits into a double.
*result = static_cast<double>(ReadUint64(trimmed, &read_digits));
ASSERT(read_digits == trimmed.length());
*result *= exact_powers_of_ten[exponent];
return true;
}
int remaining_digits =
kMaxExactDoubleIntegerDecimalDigits - trimmed.length();
if ((0 <= exponent) &&
(exponent - remaining_digits < kExactPowersOfTenSize)) {
// The trimmed string was short and we can multiply it with
// 10^remaining_digits. As a result the remaining exponent now fits
// into a double too.
*result = static_cast<double>(ReadUint64(trimmed, &read_digits));
ASSERT(read_digits == trimmed.length());
*result *= exact_powers_of_ten[remaining_digits];
*result *= exact_powers_of_ten[exponent - remaining_digits];
return true;
}
}
return false;
}
// Returns 10^exponent as an exact DiyFp.
// The given exponent must be in the range [1; kDecimalExponentDistance[.
static DiyFp AdjustmentPowerOfTen(int exponent) {
ASSERT(0 < exponent);
ASSERT(exponent < PowersOfTenCache::kDecimalExponentDistance);
// Simply hardcode the remaining powers for the given decimal exponent
// distance.
ASSERT(PowersOfTenCache::kDecimalExponentDistance == 8);
switch (exponent) {
case 1: return DiyFp(UINT64_2PART_C(0xa0000000, 00000000), -60);
case 2: return DiyFp(UINT64_2PART_C(0xc8000000, 00000000), -57);
case 3: return DiyFp(UINT64_2PART_C(0xfa000000, 00000000), -54);
case 4: return DiyFp(UINT64_2PART_C(0x9c400000, 00000000), -50);
case 5: return DiyFp(UINT64_2PART_C(0xc3500000, 00000000), -47);
case 6: return DiyFp(UINT64_2PART_C(0xf4240000, 00000000), -44);
case 7: return DiyFp(UINT64_2PART_C(0x98968000, 00000000), -40);
default:
UNREACHABLE();
}
}
// If the function returns true then the result is the correct double.
// Otherwise it is either the correct double or the double that is just below
// the correct double.
static bool DiyFpStrtod(Vector<const char> buffer,
int exponent,
double* result) {
DiyFp input;
int remaining_decimals;
ReadDiyFp(buffer, &input, &remaining_decimals);
// Since we may have dropped some digits the input is not accurate.
// If remaining_decimals is different than 0 than the error is at most
// .5 ulp (unit in the last place).
// We don't want to deal with fractions and therefore keep a common
// denominator.
const int kDenominatorLog = 3;
const int kDenominator = 1 << kDenominatorLog;
// Move the remaining decimals into the exponent.
exponent += remaining_decimals;
uint64_t error = (remaining_decimals == 0 ? 0 : kDenominator / 2);
int old_e = input.e();
input.Normalize();
error <<= old_e - input.e();
ASSERT(exponent <= PowersOfTenCache::kMaxDecimalExponent);
if (exponent < PowersOfTenCache::kMinDecimalExponent) {
*result = 0.0;
return true;
}
DiyFp cached_power;
int cached_decimal_exponent;
PowersOfTenCache::GetCachedPowerForDecimalExponent(exponent,
&cached_power,
&cached_decimal_exponent);
if (cached_decimal_exponent != exponent) {
int adjustment_exponent = exponent - cached_decimal_exponent;
DiyFp adjustment_power = AdjustmentPowerOfTen(adjustment_exponent);
input.Multiply(adjustment_power);
if (kMaxUint64DecimalDigits - buffer.length() >= adjustment_exponent) {
// The product of input with the adjustment power fits into a 64 bit
// integer.
ASSERT(DiyFp::kSignificandSize == 64);
} else {
// The adjustment power is exact. There is hence only an error of 0.5.
error += kDenominator / 2;
}
}
input.Multiply(cached_power);
// The error introduced by a multiplication of a*b equals
// error_a + error_b + error_a*error_b/2^64 + 0.5
// Substituting a with 'input' and b with 'cached_power' we have
// error_b = 0.5 (all cached powers have an error of less than 0.5 ulp),
// error_ab = 0 or 1 / kDenominator > error_a*error_b/ 2^64
int error_b = kDenominator / 2;
int error_ab = (error == 0 ? 0 : 1); // We round up to 1.
int fixed_error = kDenominator / 2;
error += error_b + error_ab + fixed_error;
old_e = input.e();
input.Normalize();
error <<= old_e - input.e();
// See if the double's significand changes if we add/subtract the error.
int order_of_magnitude = DiyFp::kSignificandSize + input.e();
int effective_significand_size =
Double::SignificandSizeForOrderOfMagnitude(order_of_magnitude);
int precision_digits_count =
DiyFp::kSignificandSize - effective_significand_size;
if (precision_digits_count + kDenominatorLog >= DiyFp::kSignificandSize) {
// This can only happen for very small denormals. In this case the
// half-way multiplied by the denominator exceeds the range of an uint64.
// Simply shift everything to the right.
int shift_amount = (precision_digits_count + kDenominatorLog) -
DiyFp::kSignificandSize + 1;
input.set_f(input.f() >> shift_amount);
input.set_e(input.e() + shift_amount);
// We add 1 for the lost precision of error, and kDenominator for
// the lost precision of input.f().
error = (error >> shift_amount) + 1 + kDenominator;
precision_digits_count -= shift_amount;
}
// We use uint64_ts now. This only works if the DiyFp uses uint64_ts too.
ASSERT(DiyFp::kSignificandSize == 64);
ASSERT(precision_digits_count < 64);
uint64_t one64 = 1;
uint64_t precision_bits_mask = (one64 << precision_digits_count) - 1;
uint64_t precision_bits = input.f() & precision_bits_mask;
uint64_t half_way = one64 << (precision_digits_count - 1);
precision_bits *= kDenominator;
half_way *= kDenominator;
DiyFp rounded_input(input.f() >> precision_digits_count,
input.e() + precision_digits_count);
if (precision_bits >= half_way + error) {
rounded_input.set_f(rounded_input.f() + 1);
}
// If the last_bits are too close to the half-way case than we are too
// inaccurate and round down. In this case we return false so that we can
// fall back to a more precise algorithm.
*result = Double(rounded_input).value();
if (half_way - error < precision_bits && precision_bits < half_way + error) {
// Too imprecise. The caller will have to fall back to a slower version.
// However the returned number is guaranteed to be either the correct
// double, or the next-lower double.
return false;
} else {
return true;
}
}
// Returns
// - -1 if buffer*10^exponent < diy_fp.
// - 0 if buffer*10^exponent == diy_fp.
// - +1 if buffer*10^exponent > diy_fp.
// Preconditions:
// buffer.length() + exponent <= kMaxDecimalPower + 1
// buffer.length() + exponent > kMinDecimalPower
// buffer.length() <= kMaxDecimalSignificantDigits
static int CompareBufferWithDiyFp(Vector<const char> buffer,
int exponent,
DiyFp diy_fp) {
ASSERT(buffer.length() + exponent <= kMaxDecimalPower + 1);
ASSERT(buffer.length() + exponent > kMinDecimalPower);
ASSERT(buffer.length() <= kMaxSignificantDecimalDigits);
// Make sure that the Bignum will be able to hold all our numbers.
// Our Bignum implementation has a separate field for exponents. Shifts will
// consume at most one bigit (< 64 bits).
// ln(10) == 3.3219...
ASSERT(((kMaxDecimalPower + 1) * 333 / 100) < Bignum::kMaxSignificantBits);
Bignum buffer_bignum;
Bignum diy_fp_bignum;
buffer_bignum.AssignDecimalString(buffer);
diy_fp_bignum.AssignUInt64(diy_fp.f());
if (exponent >= 0) {
buffer_bignum.MultiplyByPowerOfTen(exponent);
} else {
diy_fp_bignum.MultiplyByPowerOfTen(-exponent);
}
if (diy_fp.e() > 0) {
diy_fp_bignum.ShiftLeft(diy_fp.e());
} else {
buffer_bignum.ShiftLeft(-diy_fp.e());
}
return Bignum::Compare(buffer_bignum, diy_fp_bignum);
}
// Returns true if the guess is the correct double.
// Returns false, when guess is either correct or the next-lower double.
static bool ComputeGuess(Vector<const char> trimmed, int exponent,
double* guess) {
if (trimmed.length() == 0) {
*guess = 0.0;
return true;
}
if (exponent + trimmed.length() - 1 >= kMaxDecimalPower) {
*guess = Double::Infinity();
return true;
}
if (exponent + trimmed.length() <= kMinDecimalPower) {
*guess = 0.0;
return true;
}
if (DoubleStrtod(trimmed, exponent, guess) ||
DiyFpStrtod(trimmed, exponent, guess)) {
return true;
}
if (*guess == Double::Infinity()) {
return true;
}
return false;
}
double Strtod(Vector<const char> buffer, int exponent) {
char copy_buffer[kMaxSignificantDecimalDigits];
Vector<const char> trimmed;
int updated_exponent;
TrimAndCut(buffer, exponent, copy_buffer, kMaxSignificantDecimalDigits,
&trimmed, &updated_exponent);
exponent = updated_exponent;
double guess;
bool is_correct = ComputeGuess(trimmed, exponent, &guess);
if (is_correct) return guess;
DiyFp upper_boundary = Double(guess).UpperBoundary();
int comparison = CompareBufferWithDiyFp(trimmed, exponent, upper_boundary);
if (comparison < 0) {
return guess;
} else if (comparison > 0) {
return Double(guess).NextDouble();
} else if ((Double(guess).Significand() & 1) == 0) {
// Round towards even.
return guess;
} else {
return Double(guess).NextDouble();
}
}
float Strtof(Vector<const char> buffer, int exponent) {
char copy_buffer[kMaxSignificantDecimalDigits];
Vector<const char> trimmed;
int updated_exponent;
TrimAndCut(buffer, exponent, copy_buffer, kMaxSignificantDecimalDigits,
&trimmed, &updated_exponent);
exponent = updated_exponent;
double double_guess;
bool is_correct = ComputeGuess(trimmed, exponent, &double_guess);
float float_guess = static_cast<float>(double_guess);
if (float_guess == double_guess) {
// This shortcut triggers for integer values.
return float_guess;
}
// We must catch double-rounding. Say the double has been rounded up, and is
// now a boundary of a float, and rounds up again. This is why we have to
// look at previous too.
// Example (in decimal numbers):
// input: 12349
// high-precision (4 digits): 1235
// low-precision (3 digits):
// when read from input: 123
// when rounded from high precision: 124.
// To do this we simply look at the neigbors of the correct result and see
// if they would round to the same float. If the guess is not correct we have
// to look at four values (since two different doubles could be the correct
// double).
double double_next = Double(double_guess).NextDouble();
double double_previous = Double(double_guess).PreviousDouble();
float f1 = static_cast<float>(double_previous);
float f2 = float_guess;
float f3 = static_cast<float>(double_next);
float f4;
if (is_correct) {
f4 = f3;
} else {
double double_next2 = Double(double_next).NextDouble();
f4 = static_cast<float>(double_next2);
}
(void) f2; // Mark variable as used.
ASSERT(f1 <= f2 && f2 <= f3 && f3 <= f4);
// If the guess doesn't lie near a single-precision boundary we can simply
// return its float-value.
if (f1 == f4) {
return float_guess;
}
ASSERT((f1 != f2 && f2 == f3 && f3 == f4) ||
(f1 == f2 && f2 != f3 && f3 == f4) ||
(f1 == f2 && f2 == f3 && f3 != f4));
// guess and next are the two possible canditates (in the same way that
// double_guess was the lower candidate for a double-precision guess).
float guess = f1;
float next = f4;
DiyFp upper_boundary;
if (guess == 0.0f) {
float min_float = 1e-45f;
upper_boundary = Double(static_cast<double>(min_float) / 2).AsDiyFp();
} else {
upper_boundary = Single(guess).UpperBoundary();
}
int comparison = CompareBufferWithDiyFp(trimmed, exponent, upper_boundary);
if (comparison < 0) {
return guess;
} else if (comparison > 0) {
return next;
} else if ((Single(guess).Significand() & 1) == 0) {
// Round towards even.
return guess;
} else {
return next;
}
}
} // namespace double_conversion

View File

@ -0,0 +1,45 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef DOUBLE_CONVERSION_STRTOD_H_
#define DOUBLE_CONVERSION_STRTOD_H_
#include "utils.h"
namespace double_conversion {
// The buffer must only contain digits in the range [0-9]. It must not
// contain a dot or a sign. It must not start with '0', and must not be empty.
double Strtod(Vector<const char> buffer, int exponent);
// The buffer must only contain digits in the range [0-9]. It must not
// contain a dot or a sign. It must not start with '0', and must not be empty.
float Strtof(Vector<const char> buffer, int exponent);
} // namespace double_conversion
#endif // DOUBLE_CONVERSION_STRTOD_H_

View File

@ -0,0 +1,324 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef DOUBLE_CONVERSION_UTILS_H_
#define DOUBLE_CONVERSION_UTILS_H_
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#ifndef ASSERT
#define ASSERT(condition) \
assert(condition);
#endif
#ifndef UNIMPLEMENTED
#define UNIMPLEMENTED() (abort())
#endif
#ifndef UNREACHABLE
#define UNREACHABLE() (abort())
#endif
// Double operations detection based on target architecture.
// Linux uses a 80bit wide floating point stack on x86. This induces double
// rounding, which in turn leads to wrong results.
// An easy way to test if the floating-point operations are correct is to
// evaluate: 89255.0/1e22. If the floating-point stack is 64 bits wide then
// the result is equal to 89255e-22.
// The best way to test this, is to create a division-function and to compare
// the output of the division with the expected result. (Inlining must be
// disabled.)
// On Linux,x86 89255e-22 != Div_double(89255.0/1e22)
#if defined(_M_X64) || defined(__x86_64__) || \
defined(__ARMEL__) || defined(__avr32__) || \
defined(__hppa__) || defined(__ia64__) || \
defined(__mips__) || \
defined(__powerpc__) || defined(__ppc__) || defined(__ppc64__) || \
defined(__sparc__) || defined(__sparc) || defined(__s390__) || \
defined(__SH4__) || defined(__alpha__) || \
defined(_MIPS_ARCH_MIPS32R2) || \
defined(__AARCH64EL__)
#define DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS 1
#elif defined(__mc68000__)
#undef DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS
#elif defined(_M_IX86) || defined(__i386__) || defined(__i386)
#if defined(_WIN32)
// Windows uses a 64bit wide floating point stack.
#define DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS 1
#else
#undef DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS
#endif // _WIN32
#else
#error Target architecture was not detected as supported by Double-Conversion.
#endif
#if defined(__GNUC__)
#define DOUBLE_CONVERSION_UNUSED __attribute__((unused))
#else
#define DOUBLE_CONVERSION_UNUSED
#endif
#if defined(_WIN32) && !defined(__MINGW32__)
typedef signed char int8_t;
typedef unsigned char uint8_t;
typedef short int16_t; // NOLINT
typedef unsigned short uint16_t; // NOLINT
typedef int int32_t;
typedef unsigned int uint32_t;
typedef __int64 int64_t;
typedef unsigned __int64 uint64_t;
// intptr_t and friends are defined in crtdefs.h through stdio.h.
#else
#include <stdint.h>
#endif
// The following macro works on both 32 and 64-bit platforms.
// Usage: instead of writing 0x1234567890123456
// write UINT64_2PART_C(0x12345678,90123456);
#define UINT64_2PART_C(a, b) (((static_cast<uint64_t>(a) << 32) + 0x##b##u))
// The expression ARRAY_SIZE(a) is a compile-time constant of type
// size_t which represents the number of elements of the given
// array. You should only use ARRAY_SIZE on statically allocated
// arrays.
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(a) \
((sizeof(a) / sizeof(*(a))) / \
static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
#endif
// A macro to disallow the evil copy constructor and operator= functions
// This should be used in the private: declarations for a class
#ifndef DISALLOW_COPY_AND_ASSIGN
#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
TypeName(const TypeName&); \
void operator=(const TypeName&)
#endif
// A macro to disallow all the implicit constructors, namely the
// default constructor, copy constructor and operator= functions.
//
// This should be used in the private: declarations for a class
// that wants to prevent anyone from instantiating it. This is
// especially useful for classes containing only static methods.
#ifndef DISALLOW_IMPLICIT_CONSTRUCTORS
#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
TypeName(); \
DISALLOW_COPY_AND_ASSIGN(TypeName)
#endif
namespace double_conversion {
static const int kCharSize = sizeof(char);
// Returns the maximum of the two parameters.
template <typename T>
static T Max(T a, T b) {
return a < b ? b : a;
}
// Returns the minimum of the two parameters.
template <typename T>
static T Min(T a, T b) {
return a < b ? a : b;
}
inline int StrLength(const char* string) {
size_t length = strlen(string);
ASSERT(length == static_cast<size_t>(static_cast<int>(length)));
return static_cast<int>(length);
}
// This is a simplified version of V8's Vector class.
template <typename T>
class Vector {
public:
Vector() : start_(NULL), length_(0) {}
Vector(T* data, int length) : start_(data), length_(length) {
ASSERT(length == 0 || (length > 0 && data != NULL));
}
// Returns a vector using the same backing storage as this one,
// spanning from and including 'from', to but not including 'to'.
Vector<T> SubVector(int from, int to) {
ASSERT(to <= length_);
ASSERT(from < to);
ASSERT(0 <= from);
return Vector<T>(start() + from, to - from);
}
// Returns the length of the vector.
int length() const { return length_; }
// Returns whether or not the vector is empty.
bool is_empty() const { return length_ == 0; }
// Returns the pointer to the start of the data in the vector.
T* start() const { return start_; }
// Access individual vector elements - checks bounds in debug mode.
T& operator[](int index) const {
ASSERT(0 <= index && index < length_);
return start_[index];
}
T& first() { return start_[0]; }
T& last() { return start_[length_ - 1]; }
private:
T* start_;
int length_;
};
// Helper class for building result strings in a character buffer. The
// purpose of the class is to use safe operations that checks the
// buffer bounds on all operations in debug mode.
class StringBuilder {
public:
StringBuilder(char* buffer, int size)
: buffer_(buffer, size), position_(0) { }
~StringBuilder() { if (!is_finalized()) Finalize(); }
int size() const { return buffer_.length(); }
// Get the current position in the builder.
int position() const {
ASSERT(!is_finalized());
return position_;
}
// Reset the position.
void Reset() { position_ = 0; }
// Add a single character to the builder. It is not allowed to add
// 0-characters; use the Finalize() method to terminate the string
// instead.
void AddCharacter(char c) {
ASSERT(c != '\0');
ASSERT(!is_finalized() && position_ < buffer_.length());
buffer_[position_++] = c;
}
// Add an entire string to the builder. Uses strlen() internally to
// compute the length of the input string.
void AddString(const char* s) {
AddSubstring(s, StrLength(s));
}
// Add the first 'n' characters of the given string 's' to the
// builder. The input string must have enough characters.
void AddSubstring(const char* s, int n) {
ASSERT(!is_finalized() && position_ + n < buffer_.length());
ASSERT(static_cast<size_t>(n) <= strlen(s));
memmove(&buffer_[position_], s, n * kCharSize);
position_ += n;
}
// Add character padding to the builder. If count is non-positive,
// nothing is added to the builder.
void AddPadding(char c, int count) {
for (int i = 0; i < count; i++) {
AddCharacter(c);
}
}
// Finalize the string by 0-terminating it and returning the buffer.
char* Finalize() {
ASSERT(!is_finalized() && position_ < buffer_.length());
buffer_[position_] = '\0';
// Make sure nobody managed to add a 0-character to the
// buffer while building the string.
ASSERT(strlen(buffer_.start()) == static_cast<size_t>(position_));
position_ = -1;
ASSERT(is_finalized());
return buffer_.start();
}
private:
Vector<char> buffer_;
int position_;
bool is_finalized() const { return position_ < 0; }
DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder);
};
// The type-based aliasing rule allows the compiler to assume that pointers of
// different types (for some definition of different) never alias each other.
// Thus the following code does not work:
//
// float f = foo();
// int fbits = *(int*)(&f);
//
// The compiler 'knows' that the int pointer can't refer to f since the types
// don't match, so the compiler may cache f in a register, leaving random data
// in fbits. Using C++ style casts makes no difference, however a pointer to
// char data is assumed to alias any other pointer. This is the 'memcpy
// exception'.
//
// Bit_cast uses the memcpy exception to move the bits from a variable of one
// type of a variable of another type. Of course the end result is likely to
// be implementation dependent. Most compilers (gcc-4.2 and MSVC 2005)
// will completely optimize BitCast away.
//
// There is an additional use for BitCast.
// Recent gccs will warn when they see casts that may result in breakage due to
// the type-based aliasing rule. If you have checked that there is no breakage
// you can use BitCast to cast one pointer type to another. This confuses gcc
// enough that it can no longer see that you have cast one pointer type to
// another thus avoiding the warning.
template <class Dest, class Source>
inline Dest BitCast(const Source& source) {
// Compile time assertion: sizeof(Dest) == sizeof(Source)
// A compile error here means your Dest and Source have different sizes.
DOUBLE_CONVERSION_UNUSED
typedef char VerifySizesAreEqual[sizeof(Dest) == sizeof(Source) ? 1 : -1];
Dest dest;
memmove(&dest, &source, sizeof(dest));
return dest;
}
template <class Dest, class Source>
inline Dest BitCast(Source* source) {
return BitCast<Dest>(reinterpret_cast<uintptr_t>(source));
}
} // namespace double_conversion
#endif // DOUBLE_CONVERSION_UTILS_H_

177
ios/Pods/Folly/LICENSE generated Normal file
View File

@ -0,0 +1,177 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

233
ios/Pods/Folly/README.md generated Normal file
View File

@ -0,0 +1,233 @@
Folly: Facebook Open-source Library
-----------------------------------
### What is `folly`?
Folly (acronymed loosely after Facebook Open Source Library) is a
library of C++11 components designed with practicality and efficiency
in mind. **Folly contains a variety of core library components used extensively
at Facebook**. In particular, it's often a dependency of Facebook's other
open source C++ efforts and place where those projects can share code.
It complements (as opposed to competing against) offerings
such as Boost and of course `std`. In fact, we embark on defining our
own component only when something we need is either not available, or
does not meet the needed performance profile. We endeavor to remove
things from folly if or when `std` or Boost obsoletes them.
Performance concerns permeate much of Folly, sometimes leading to
designs that are more idiosyncratic than they would otherwise be (see
e.g. `PackedSyncPtr.h`, `SmallLocks.h`). Good performance at large
scale is a unifying theme in all of Folly.
### Logical Design
Folly is a collection of relatively independent components, some as
simple as a few symbols. There is no restriction on internal
dependencies, meaning that a given folly module may use any other
folly components.
All symbols are defined in the top-level namespace `folly`, except of
course macros. Macro names are ALL_UPPERCASE and should be prefixed
with `FOLLY_`. Namespace `folly` defines other internal namespaces
such as `internal` or `detail`. User code should not depend on symbols
in those namespaces.
Folly has an `experimental` directory as well. This designation connotes
primarily that we feel the API may change heavily over time. This code,
typically, is still in heavy use and is well tested.
### Physical Design
At the top level Folly uses the classic "stuttering" scheme
`folly/folly` used by Boost and others. The first directory serves as
an installation root of the library (with possible versioning a la
`folly-1.0/`), and the second is to distinguish the library when
including files, e.g. `#include <folly/FBString.h>`.
The directory structure is flat (mimicking the namespace structure),
i.e. we don't have an elaborate directory hierarchy (it is possible
this will change in future versions). The subdirectory `experimental`
contains files that are used inside folly and possibly at Facebook but
not considered stable enough for client use. Your code should not use
files in `folly/experimental` lest it may break when you update Folly.
The `folly/folly/test` subdirectory includes the unittests for all
components, usually named `ComponentXyzTest.cpp` for each
`ComponentXyz.*`. The `folly/folly/docs` directory contains
documentation.
### What's in it?
Because of folly's fairly flat structure, the best way to see what's in it
is to look at the headers in [top level `folly/` directory](https://github.com/facebook/folly/tree/master/folly). You can also
check the [`docs` folder](folly/docs) for documentation, starting with the
[overview](folly/docs/Overview.md).
Folly is published on Github at https://github.com/facebook/folly
### Build Notes
#### Dependencies
folly requires gcc 4.8+ and a version of boost compiled with C++11 support.
Please download googletest from
https://googletest.googlecode.com/files/gtest-1.7.0.zip and unzip it in the
folly/test subdirectory.
#### Ubuntu 12.04
This release is old, requiring many upgrades. However, since Travis CI runs
on 12.04, `folly/build/deps_ubuntu_12.04.sh` is provided, and upgrades all
the required packages.
#### Ubuntu 13.10
The following packages are required (feel free to cut and paste the apt-get
command below):
```
sudo apt-get install \
g++ \
automake \
autoconf \
autoconf-archive \
libtool \
libboost-all-dev \
libevent-dev \
libdouble-conversion-dev \
libgoogle-glog-dev \
libgflags-dev \
liblz4-dev \
liblzma-dev \
libsnappy-dev \
make \
zlib1g-dev \
binutils-dev \
libjemalloc-dev \
libssl-dev
```
If advanced debugging functionality is required
```
sudo apt-get install \
libunwind8-dev \
libelf-dev \
libdwarf-dev
```
#### Ubuntu 14.04 LTS
The packages listed above for Ubuntu 13.10 are required, as well as:
```
sudo apt-get install \
libiberty-dev
```
The above packages are sufficient for Ubuntu 13.10 and Ubuntu 14.04.
In the folly directory, run
```
autoreconf -ivf
./configure
make
make check
sudo make install
```
#### OS X (Homebrew)
folly is available as a Formula and releases may be built via `brew install folly`.
You may also use `folly/build/bootstrap-osx-homebrew.sh` to build against `master`:
```
cd folly
./build/bootstrap-osx-homebrew.sh
make
make check
```
#### OS X (MacPorts)
Install the required packages from MacPorts:
```
sudo port install \
autoconf \
automake \
boost \
gflags \
git \
google-glog \
libevent \
libtool \
lz4 \
lzma \
scons \
snappy \
zlib
```
Download and install double-conversion:
```
git clone https://github.com/google/double-conversion.git
cd double-conversion
cmake -DBUILD_SHARED_LIBS=ON .
make
sudo make install
```
Download and install folly with the parameters listed below:
```
git clone https://github.com/facebook/folly.git
cd folly/folly
autoreconf -ivf
./configure CPPFLAGS="-I/opt/local/include" LDFLAGS="-L/opt/local/lib"
make
sudo make install
```
#### Other Linux distributions
- double-conversion (https://github.com/google/double-conversion)
Download and build double-conversion.
You may need to tell configure where to find it.
[double-conversion/] `ln -s src double-conversion`
[folly/] `./configure LDFLAGS=-L$DOUBLE_CONVERSION_HOME/ CPPFLAGS=-I$DOUBLE_CONVERSION_HOME/`
[folly/] `LD_LIBRARY_PATH=$DOUBLE_CONVERSION_HOME/ make`
- additional platform specific dependencies:
Fedora 21 64-bit
- gcc
- gcc-c++
- autoconf
- autoconf-archive
- automake
- boost-devel
- libtool
- lz4-devel
- lzma-devel
- snappy-devel
- zlib-devel
- glog-devel
- gflags-devel
- scons
- double-conversion-devel
- openssl-devel
- libevent-devel
Optional
- libdwarf-dev
- libelf-dev
- libunwind8-dev

124
ios/Pods/Folly/folly/ApplyTuple.h generated Normal file
View File

@ -0,0 +1,124 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Defines a function folly::applyTuple, which takes a function and a
* std::tuple of arguments and calls the function with those
* arguments.
*
* Example:
*
* int x = folly::applyTuple(std::plus<int>(), std::make_tuple(12, 12));
* ASSERT(x == 24);
*/
#pragma once
#include <functional>
#include <tuple>
#include <utility>
namespace folly {
//////////////////////////////////////////////////////////////////////
namespace detail {
namespace apply_tuple {
template <std::size_t...>
struct IndexSequence {};
template <std::size_t N, std::size_t... Is>
struct MakeIndexSequence : MakeIndexSequence<N - 1, N - 1, Is...> {};
template <std::size_t... Is>
struct MakeIndexSequence<0, Is...> : IndexSequence<Is...> {};
inline constexpr std::size_t sum() {
return 0;
}
template <typename... Args>
inline constexpr std::size_t sum(std::size_t v1, Args... vs) {
return v1 + sum(vs...);
}
template <typename... Tuples>
struct TupleSizeSum {
static constexpr auto value = sum(std::tuple_size<Tuples>::value...);
};
template <typename... Tuples>
using MakeIndexSequenceFromTuple = MakeIndexSequence<
TupleSizeSum<typename std::decay<Tuples>::type...>::value>;
// This is to allow using this with pointers to member functions,
// where the first argument in the tuple will be the this pointer.
template <class F>
inline constexpr F&& makeCallable(F&& f) {
return std::forward<F>(f);
}
template <class M, class C>
inline constexpr auto makeCallable(M(C::*d)) -> decltype(std::mem_fn(d)) {
return std::mem_fn(d);
}
template <class F, class Tuple, std::size_t... Indexes>
inline constexpr auto call(F&& f, Tuple&& t, IndexSequence<Indexes...>)
-> decltype(
std::forward<F>(f)(std::get<Indexes>(std::forward<Tuple>(t))...)) {
return std::forward<F>(f)(std::get<Indexes>(std::forward<Tuple>(t))...);
}
template <class Tuple, std::size_t... Indexes>
inline constexpr auto forwardTuple(Tuple&& t, IndexSequence<Indexes...>)
-> decltype(
std::forward_as_tuple(std::get<Indexes>(std::forward<Tuple>(t))...)) {
return std::forward_as_tuple(std::get<Indexes>(std::forward<Tuple>(t))...);
}
} // namespace apply_tuple
} // namespace detail
//////////////////////////////////////////////////////////////////////
/**
* Invoke a callable object with a set of arguments passed as a tuple, or a
* series of tuples
*
* Example: the following lines are equivalent
* func(1, 2, 3, "foo");
* applyTuple(func, std::make_tuple(1, 2, 3, "foo"));
* applyTuple(func, std::make_tuple(1, 2), std::make_tuple(3, "foo"));
*/
template <class F, class... Tuples>
inline constexpr auto applyTuple(F&& f, Tuples&&... t)
-> decltype(detail::apply_tuple::call(
detail::apply_tuple::makeCallable(std::forward<F>(f)),
std::tuple_cat(detail::apply_tuple::forwardTuple(
std::forward<Tuples>(t),
detail::apply_tuple::MakeIndexSequenceFromTuple<Tuples>{})...),
detail::apply_tuple::MakeIndexSequenceFromTuple<Tuples...>{})) {
return detail::apply_tuple::call(
detail::apply_tuple::makeCallable(std::forward<F>(f)),
std::tuple_cat(detail::apply_tuple::forwardTuple(
std::forward<Tuples>(t),
detail::apply_tuple::MakeIndexSequenceFromTuple<Tuples>{})...),
detail::apply_tuple::MakeIndexSequenceFromTuple<Tuples...>{});
}
//////////////////////////////////////////////////////////////////////
}

93
ios/Pods/Folly/folly/Arena-inl.h generated Normal file
View File

@ -0,0 +1,93 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FOLLY_ARENA_H_
#error This file may only be included from Arena.h
#endif
// Implementation of Arena.h functions
namespace folly {
template <class Alloc>
std::pair<typename Arena<Alloc>::Block*, size_t>
Arena<Alloc>::Block::allocate(Alloc& alloc, size_t size, bool allowSlack) {
size_t allocSize = sizeof(Block) + size;
if (allowSlack) {
allocSize = ArenaAllocatorTraits<Alloc>::goodSize(alloc, allocSize);
}
void* mem = alloc.allocate(allocSize);
return std::make_pair(new (mem) Block(), allocSize - sizeof(Block));
}
template <class Alloc>
void Arena<Alloc>::Block::deallocate(Alloc& alloc) {
this->~Block();
alloc.deallocate(this);
}
template <class Alloc>
void* Arena<Alloc>::allocateSlow(size_t size) {
std::pair<Block*, size_t> p;
char* start;
size_t allocSize = std::max(size, minBlockSize()) + sizeof(Block);
if (sizeLimit_ != kNoSizeLimit &&
allocSize > sizeLimit_ - totalAllocatedSize_) {
throw std::bad_alloc();
}
if (size > minBlockSize()) {
// Allocate a large block for this chunk only, put it at the back of the
// list so it doesn't get used for small allocations; don't change ptr_
// and end_, let them point into a normal block (or none, if they're
// null)
p = Block::allocate(alloc(), size, false);
start = p.first->start();
blocks_.push_back(*p.first);
} else {
// Allocate a normal sized block and carve out size bytes from it
p = Block::allocate(alloc(), minBlockSize(), true);
start = p.first->start();
blocks_.push_front(*p.first);
ptr_ = start + size;
end_ = start + p.second;
}
assert(p.second >= size);
totalAllocatedSize_ += p.second + sizeof(Block);
return start;
}
template <class Alloc>
void Arena<Alloc>::merge(Arena<Alloc>&& other) {
blocks_.splice_after(blocks_.before_begin(), other.blocks_);
other.blocks_.clear();
other.ptr_ = other.end_ = nullptr;
totalAllocatedSize_ += other.totalAllocatedSize_;
other.totalAllocatedSize_ = 0;
}
template <class Alloc>
Arena<Alloc>::~Arena() {
auto disposer = [this] (Block* b) { b->deallocate(this->alloc()); };
while (!blocks_.empty()) {
blocks_.pop_front_and_dispose(disposer);
}
}
} // namespace folly

245
ios/Pods/Folly/folly/Arena.h generated Normal file
View File

@ -0,0 +1,245 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#define FOLLY_ARENA_H_
#include <cassert>
#include <limits>
#include <stdexcept>
#include <utility>
#include <boost/intrusive/slist.hpp>
#include <folly/Conv.h>
#include <folly/Likely.h>
#include <folly/Malloc.h>
#include <folly/Memory.h>
namespace folly {
/**
* Simple arena: allocate memory which gets freed when the arena gets
* destroyed.
*
* The arena itself allocates memory using a custom allocator which provides
* the following interface (same as required by StlAllocator in StlAllocator.h)
*
* void* allocate(size_t size);
* Allocate a block of size bytes, properly aligned to the maximum
* alignment required on your system; throw std::bad_alloc if the
* allocation can't be satisfied.
*
* void deallocate(void* ptr);
* Deallocate a previously allocated block.
*
* You may also specialize ArenaAllocatorTraits for your allocator type to
* provide:
*
* size_t goodSize(const Allocator& alloc, size_t size) const;
* Return a size (>= the provided size) that is considered "good" for your
* allocator (for example, if your allocator allocates memory in 4MB
* chunks, size should be rounded up to 4MB). The provided value is
* guaranteed to be rounded up to a multiple of the maximum alignment
* required on your system; the returned value must be also.
*
* An implementation that uses malloc() / free() is defined below, see SysArena.
*/
template <class Alloc> struct ArenaAllocatorTraits;
template <class Alloc>
class Arena {
public:
explicit Arena(const Alloc& alloc,
size_t minBlockSize = kDefaultMinBlockSize,
size_t sizeLimit = kNoSizeLimit,
size_t maxAlign = kDefaultMaxAlign)
: allocAndSize_(alloc, minBlockSize)
, ptr_(nullptr)
, end_(nullptr)
, totalAllocatedSize_(0)
, bytesUsed_(0)
, sizeLimit_(sizeLimit)
, maxAlign_(maxAlign) {
if ((maxAlign_ & (maxAlign_ - 1)) || maxAlign_ > alignof(Block)) {
throw std::invalid_argument(
folly::to<std::string>("Invalid maxAlign: ", maxAlign_));
}
}
~Arena();
void* allocate(size_t size) {
size = roundUp(size);
bytesUsed_ += size;
assert(ptr_ <= end_);
if (LIKELY((size_t)(end_ - ptr_) >= size)) {
// Fast path: there's enough room in the current block
char* r = ptr_;
ptr_ += size;
assert(isAligned(r));
return r;
}
// Not enough room in the current block
void* r = allocateSlow(size);
assert(isAligned(r));
return r;
}
void deallocate(void* /* p */) {
// Deallocate? Never!
}
// Transfer ownership of all memory allocated from "other" to "this".
void merge(Arena&& other);
// Gets the total memory used by the arena
size_t totalSize() const {
return totalAllocatedSize_ + sizeof(Arena);
}
// Gets the total number of "used" bytes, i.e. bytes that the arena users
// allocated via the calls to `allocate`. Doesn't include fragmentation, e.g.
// if block size is 4KB and you allocate 2 objects of 3KB in size,
// `bytesUsed()` will be 6KB, while `totalSize()` will be 8KB+.
size_t bytesUsed() const {
return bytesUsed_;
}
// not copyable
Arena(const Arena&) = delete;
Arena& operator=(const Arena&) = delete;
// movable
Arena(Arena&&) = default;
Arena& operator=(Arena&&) = default;
private:
struct Block;
typedef boost::intrusive::slist_member_hook<
boost::intrusive::tag<Arena>> BlockLink;
struct FOLLY_ALIGNED_MAX Block {
BlockLink link;
// Allocate a block with at least size bytes of storage.
// If allowSlack is true, allocate more than size bytes if convenient
// (via ArenaAllocatorTraits::goodSize()) as we'll try to pack small
// allocations in this block.
static std::pair<Block*, size_t> allocate(
Alloc& alloc, size_t size, bool allowSlack);
void deallocate(Alloc& alloc);
char* start() {
return reinterpret_cast<char*>(this + 1);
}
private:
Block() = default;
~Block() = default;
};
public:
static constexpr size_t kDefaultMinBlockSize = 4096 - sizeof(Block);
static constexpr size_t kNoSizeLimit = 0;
static constexpr size_t kDefaultMaxAlign = alignof(Block);
static constexpr size_t kBlockOverhead = sizeof(Block);
private:
bool isAligned(uintptr_t address) const {
return (address & (maxAlign_ - 1)) == 0;
}
bool isAligned(void* p) const {
return isAligned(reinterpret_cast<uintptr_t>(p));
}
// Round up size so it's properly aligned
size_t roundUp(size_t size) const {
return (size + maxAlign_ - 1) & ~(maxAlign_ - 1);
}
// cache_last<true> makes the list keep a pointer to the last element, so we
// have push_back() and constant time splice_after()
typedef boost::intrusive::slist<
Block,
boost::intrusive::member_hook<Block, BlockLink, &Block::link>,
boost::intrusive::constant_time_size<false>,
boost::intrusive::cache_last<true>> BlockList;
void* allocateSlow(size_t size);
// Empty member optimization: package Alloc with a non-empty member
// in case Alloc is empty (as it is in the case of SysAlloc).
struct AllocAndSize : public Alloc {
explicit AllocAndSize(const Alloc& a, size_t s)
: Alloc(a), minBlockSize(s) {
}
size_t minBlockSize;
};
size_t minBlockSize() const {
return allocAndSize_.minBlockSize;
}
Alloc& alloc() { return allocAndSize_; }
const Alloc& alloc() const { return allocAndSize_; }
AllocAndSize allocAndSize_;
BlockList blocks_;
char* ptr_;
char* end_;
size_t totalAllocatedSize_;
size_t bytesUsed_;
const size_t sizeLimit_;
const size_t maxAlign_;
};
template <class Alloc>
struct IsArenaAllocator<Arena<Alloc>> : std::true_type { };
/**
* By default, don't pad the given size.
*/
template <class Alloc>
struct ArenaAllocatorTraits {
static size_t goodSize(const Alloc& /* alloc */, size_t size) { return size; }
};
template <>
struct ArenaAllocatorTraits<SysAlloc> {
static size_t goodSize(const SysAlloc& /* alloc */, size_t size) {
return goodMallocSize(size);
}
};
/**
* Arena that uses the system allocator (malloc / free)
*/
class SysArena : public Arena<SysAlloc> {
public:
explicit SysArena(size_t minBlockSize = kDefaultMinBlockSize,
size_t sizeLimit = kNoSizeLimit,
size_t maxAlign = kDefaultMaxAlign)
: Arena<SysAlloc>(SysAlloc(), minBlockSize, sizeLimit, maxAlign) {
}
};
template <>
struct IsArenaAllocator<SysArena> : std::true_type { };
} // namespace folly
#include <folly/Arena-inl.h>

59
ios/Pods/Folly/folly/Array.h generated Normal file
View File

@ -0,0 +1,59 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/Traits.h>
#include <array>
#include <type_traits>
#include <utility>
namespace folly {
namespace array_detail {
template <typename>
struct is_ref_wrapper : std::false_type {};
template <typename T>
struct is_ref_wrapper<std::reference_wrapper<T>> : std::true_type {};
template <typename T>
using not_ref_wrapper =
folly::Negation<is_ref_wrapper<typename std::decay<T>::type>>;
template <typename D, typename...>
struct return_type_helper {
using type = D;
};
template <typename... TList>
struct return_type_helper<void, TList...> {
static_assert(
folly::Conjunction<not_ref_wrapper<TList>...>::value,
"TList cannot contain reference_wrappers when D is void");
using type = typename std::common_type<TList...>::type;
};
template <typename D, typename... TList>
using return_type = std::
array<typename return_type_helper<D, TList...>::type, sizeof...(TList)>;
} // !array_detail
template <typename D = void, typename... TList>
constexpr array_detail::return_type<D, TList...> make_array(TList&&... t) {
using value_type =
typename array_detail::return_type_helper<D, TList...>::type;
return {static_cast<value_type>(std::forward<TList>(t))...};
}
} // !folly

71
ios/Pods/Folly/folly/Assume.h generated Normal file
View File

@ -0,0 +1,71 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdlib>
#include <folly/Portability.h>
namespace folly {
namespace detail {
extern void assume_check(bool cond);
}
/**
* Inform the compiler that the argument can be assumed true. It is
* undefined behavior if the argument is not actually true, so use
* with care.
*
* Implemented as a function instead of a macro because
* __builtin_assume does not evaluate its argument at runtime, so it
* cannot be used with expressions that have side-effects.
*/
FOLLY_ALWAYS_INLINE void assume(bool cond) {
if (kIsDebug) {
detail::assume_check(cond);
} else {
#if defined(__clang__) // Must go first because Clang also defines __GNUC__.
__builtin_assume(cond);
#elif defined(__GNUC__)
if (!cond) { __builtin_unreachable(); }
#elif defined(_MSC_VER)
__assume(cond);
#else
// Do nothing.
#endif
}
}
[[noreturn]] FOLLY_ALWAYS_INLINE void assume_unreachable() {
assume(false);
// Do a bit more to get the compiler to understand
// that this function really will never return.
#if defined(__GNUC__)
__builtin_unreachable();
#elif defined(_MSC_VER)
__assume(0);
#else
// Well, it's better than nothing.
std::abort();
#endif
}
} // namespace folly

159
ios/Pods/Folly/folly/AtomicBitSet.h generated Normal file
View File

@ -0,0 +1,159 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <array>
#include <atomic>
#include <cassert>
#include <cstddef>
#include <limits>
#include <boost/noncopyable.hpp>
#include <folly/Portability.h>
namespace folly {
/**
* An atomic bitset of fixed size (specified at compile time).
*/
template <size_t N>
class AtomicBitSet : private boost::noncopyable {
public:
/**
* Construct an AtomicBitSet; all bits are initially false.
*/
AtomicBitSet();
/**
* Set bit idx to true, using the given memory order. Returns the
* previous value of the bit.
*
* Note that the operation is a read-modify-write operation due to the use
* of fetch_or.
*/
bool set(size_t idx, std::memory_order order = std::memory_order_seq_cst);
/**
* Set bit idx to false, using the given memory order. Returns the
* previous value of the bit.
*
* Note that the operation is a read-modify-write operation due to the use
* of fetch_and.
*/
bool reset(size_t idx, std::memory_order order = std::memory_order_seq_cst);
/**
* Set bit idx to the given value, using the given memory order. Returns
* the previous value of the bit.
*
* Note that the operation is a read-modify-write operation due to the use
* of fetch_and or fetch_or.
*
* Yes, this is an overload of set(), to keep as close to std::bitset's
* interface as possible.
*/
bool set(size_t idx,
bool value,
std::memory_order order = std::memory_order_seq_cst);
/**
* Read bit idx.
*/
bool test(size_t idx,
std::memory_order order = std::memory_order_seq_cst) const;
/**
* Same as test() with the default memory order.
*/
bool operator[](size_t idx) const;
/**
* Return the size of the bitset.
*/
constexpr size_t size() const {
return N;
}
private:
// Pick the largest lock-free type available
#if (ATOMIC_LLONG_LOCK_FREE == 2)
typedef unsigned long long BlockType;
#elif (ATOMIC_LONG_LOCK_FREE == 2)
typedef unsigned long BlockType;
#else
// Even if not lock free, what can we do?
typedef unsigned int BlockType;
#endif
typedef std::atomic<BlockType> AtomicBlockType;
static constexpr size_t kBitsPerBlock =
std::numeric_limits<BlockType>::digits;
static constexpr size_t blockIndex(size_t bit) {
return bit / kBitsPerBlock;
}
static constexpr size_t bitOffset(size_t bit) {
return bit % kBitsPerBlock;
}
// avoid casts
static constexpr BlockType kOne = 1;
std::array<AtomicBlockType, N> data_;
};
// value-initialize to zero
template <size_t N>
inline AtomicBitSet<N>::AtomicBitSet() : data_() {
}
template <size_t N>
inline bool AtomicBitSet<N>::set(size_t idx, std::memory_order order) {
assert(idx < N * kBitsPerBlock);
BlockType mask = kOne << bitOffset(idx);
return data_[blockIndex(idx)].fetch_or(mask, order) & mask;
}
template <size_t N>
inline bool AtomicBitSet<N>::reset(size_t idx, std::memory_order order) {
assert(idx < N * kBitsPerBlock);
BlockType mask = kOne << bitOffset(idx);
return data_[blockIndex(idx)].fetch_and(~mask, order) & mask;
}
template <size_t N>
inline bool AtomicBitSet<N>::set(size_t idx,
bool value,
std::memory_order order) {
return value ? set(idx, order) : reset(idx, order);
}
template <size_t N>
inline bool AtomicBitSet<N>::test(size_t idx, std::memory_order order) const {
assert(idx < N * kBitsPerBlock);
BlockType mask = kOne << bitOffset(idx);
return data_[blockIndex(idx)].load(order) & mask;
}
template <size_t N>
inline bool AtomicBitSet<N>::operator[](size_t idx) const {
return test(idx);
}
} // namespaces

426
ios/Pods/Folly/folly/AtomicHashArray-inl.h generated Normal file
View File

@ -0,0 +1,426 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FOLLY_ATOMICHASHARRAY_H_
#error "This should only be included by AtomicHashArray.h"
#endif
#include <type_traits>
#include <folly/Bits.h>
#include <folly/detail/AtomicHashUtils.h>
namespace folly {
// AtomicHashArray private constructor --
template <class KeyT, class ValueT, class HashFcn, class EqualFcn,
class Allocator, class ProbeFcn, class KeyConvertFcn>
AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>::
AtomicHashArray(size_t capacity, KeyT emptyKey, KeyT lockedKey,
KeyT erasedKey, double _maxLoadFactor, size_t cacheSize)
: capacity_(capacity),
maxEntries_(size_t(_maxLoadFactor * capacity_ + 0.5)),
kEmptyKey_(emptyKey), kLockedKey_(lockedKey), kErasedKey_(erasedKey),
kAnchorMask_(nextPowTwo(capacity_) - 1), numEntries_(0, cacheSize),
numPendingEntries_(0, cacheSize), isFull_(0), numErases_(0) {
}
/*
* findInternal --
*
* Sets ret.second to value found and ret.index to index
* of key and returns true, or if key does not exist returns false and
* ret.index is set to capacity_.
*/
template <class KeyT, class ValueT, class HashFcn, class EqualFcn,
class Allocator, class ProbeFcn, class KeyConvertFcn>
template <class LookupKeyT, class LookupHashFcn, class LookupEqualFcn>
typename AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>::SimpleRetT
AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>::
findInternal(const LookupKeyT key_in) {
checkLegalKeyIfKey<LookupKeyT>(key_in);
for (size_t idx = keyToAnchorIdx<LookupKeyT, LookupHashFcn>(key_in),
numProbes = 0;
;
idx = ProbeFcn()(idx, numProbes, capacity_)) {
const KeyT key = acquireLoadKey(cells_[idx]);
if (LIKELY(LookupEqualFcn()(key, key_in))) {
return SimpleRetT(idx, true);
}
if (UNLIKELY(key == kEmptyKey_)) {
// if we hit an empty element, this key does not exist
return SimpleRetT(capacity_, false);
}
// NOTE: the way we count numProbes must be same in find(), insert(),
// and erase(). Otherwise it may break probing.
++numProbes;
if (UNLIKELY(numProbes >= capacity_)) {
// probed every cell...fail
return SimpleRetT(capacity_, false);
}
}
}
/*
* insertInternal --
*
* Returns false on failure due to key collision or full.
* Also sets ret.index to the index of the key. If the map is full, sets
* ret.index = capacity_. Also sets ret.second to cell value, thus if insert
* successful this will be what we just inserted, if there is a key collision
* this will be the previously inserted value, and if the map is full it is
* default.
*/
template <class KeyT, class ValueT, class HashFcn, class EqualFcn,
class Allocator, class ProbeFcn, class KeyConvertFcn>
template <typename LookupKeyT,
typename LookupHashFcn,
typename LookupEqualFcn,
typename LookupKeyToKeyFcn,
typename... ArgTs>
typename AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>::SimpleRetT
AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>::
insertInternal(LookupKeyT key_in, ArgTs&&... vCtorArgs) {
const short NO_NEW_INSERTS = 1;
const short NO_PENDING_INSERTS = 2;
checkLegalKeyIfKey<LookupKeyT>(key_in);
size_t idx = keyToAnchorIdx<LookupKeyT, LookupHashFcn>(key_in);
size_t numProbes = 0;
for (;;) {
DCHECK_LT(idx, capacity_);
value_type* cell = &cells_[idx];
if (relaxedLoadKey(*cell) == kEmptyKey_) {
// NOTE: isFull_ is set based on numEntries_.readFast(), so it's
// possible to insert more than maxEntries_ entries. However, it's not
// possible to insert past capacity_.
++numPendingEntries_;
if (isFull_.load(std::memory_order_acquire)) {
--numPendingEntries_;
// Before deciding whether this insert succeeded, this thread needs to
// wait until no other thread can add a new entry.
// Correctness assumes isFull_ is true at this point. If
// another thread now does ++numPendingEntries_, we expect it
// to pass the isFull_.load() test above. (It shouldn't insert
// a new entry.)
detail::atomic_hash_spin_wait([&] {
return
(isFull_.load(std::memory_order_acquire) != NO_PENDING_INSERTS) &&
(numPendingEntries_.readFull() != 0);
});
isFull_.store(NO_PENDING_INSERTS, std::memory_order_release);
if (relaxedLoadKey(*cell) == kEmptyKey_) {
// Don't insert past max load factor
return SimpleRetT(capacity_, false);
}
} else {
// An unallocated cell. Try once to lock it. If we succeed, insert here.
// If we fail, fall through to comparison below; maybe the insert that
// just beat us was for this very key....
if (tryLockCell(cell)) {
KeyT key_new;
// Write the value - done before unlocking
try {
key_new = LookupKeyToKeyFcn()(key_in);
typedef typename std::remove_const<LookupKeyT>::type
LookupKeyTNoConst;
constexpr bool kAlreadyChecked =
std::is_same<KeyT, LookupKeyTNoConst>::value;
if (!kAlreadyChecked) {
checkLegalKeyIfKey(key_new);
}
DCHECK(relaxedLoadKey(*cell) == kLockedKey_);
// A const mapped_type is only constant once constructed, so cast
// away any const for the placement new here.
using mapped = typename std::remove_const<mapped_type>::type;
new (const_cast<mapped*>(&cell->second))
ValueT(std::forward<ArgTs>(vCtorArgs)...);
unlockCell(cell, key_new); // Sets the new key
} catch (...) {
// Transition back to empty key---requires handling
// locked->empty below.
unlockCell(cell, kEmptyKey_);
--numPendingEntries_;
throw;
}
// An erase() can race here and delete right after our insertion
// Direct comparison rather than EqualFcn ok here
// (we just inserted it)
DCHECK(relaxedLoadKey(*cell) == key_new ||
relaxedLoadKey(*cell) == kErasedKey_);
--numPendingEntries_;
++numEntries_; // This is a thread cached atomic increment :)
if (numEntries_.readFast() >= maxEntries_) {
isFull_.store(NO_NEW_INSERTS, std::memory_order_relaxed);
}
return SimpleRetT(idx, true);
}
--numPendingEntries_;
}
}
DCHECK(relaxedLoadKey(*cell) != kEmptyKey_);
if (kLockedKey_ == acquireLoadKey(*cell)) {
detail::atomic_hash_spin_wait([&] {
return kLockedKey_ == acquireLoadKey(*cell);
});
}
const KeyT thisKey = acquireLoadKey(*cell);
if (LookupEqualFcn()(thisKey, key_in)) {
// Found an existing entry for our key, but we don't overwrite the
// previous value.
return SimpleRetT(idx, false);
} else if (thisKey == kEmptyKey_ || thisKey == kLockedKey_) {
// We need to try again (i.e., don't increment numProbes or
// advance idx): this case can happen if the constructor for
// ValueT threw for this very cell (the rethrow block above).
continue;
}
// NOTE: the way we count numProbes must be same in find(),
// insert(), and erase(). Otherwise it may break probing.
++numProbes;
if (UNLIKELY(numProbes >= capacity_)) {
// probed every cell...fail
return SimpleRetT(capacity_, false);
}
idx = ProbeFcn()(idx, numProbes, capacity_);
}
}
/*
* erase --
*
* This will attempt to erase the given key key_in if the key is found. It
* returns 1 iff the key was located and marked as erased, and 0 otherwise.
*
* Memory is not freed or reclaimed by erase, i.e. the cell containing the
* erased key will never be reused. If there's an associated value, we won't
* touch it either.
*/
template <class KeyT, class ValueT, class HashFcn, class EqualFcn,
class Allocator, class ProbeFcn, class KeyConvertFcn>
size_t AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>::
erase(KeyT key_in) {
CHECK_NE(key_in, kEmptyKey_);
CHECK_NE(key_in, kLockedKey_);
CHECK_NE(key_in, kErasedKey_);
for (size_t idx = keyToAnchorIdx(key_in), numProbes = 0;
;
idx = ProbeFcn()(idx, numProbes, capacity_)) {
DCHECK_LT(idx, capacity_);
value_type* cell = &cells_[idx];
KeyT currentKey = acquireLoadKey(*cell);
if (currentKey == kEmptyKey_ || currentKey == kLockedKey_) {
// If we hit an empty (or locked) element, this key does not exist. This
// is similar to how it's handled in find().
return 0;
}
if (EqualFcn()(currentKey, key_in)) {
// Found an existing entry for our key, attempt to mark it erased.
// Some other thread may have erased our key, but this is ok.
KeyT expect = currentKey;
if (cellKeyPtr(*cell)->compare_exchange_strong(expect, kErasedKey_)) {
numErases_.fetch_add(1, std::memory_order_relaxed);
// Even if there's a value in the cell, we won't delete (or even
// default construct) it because some other thread may be accessing it.
// Locking it meanwhile won't work either since another thread may be
// holding a pointer to it.
// We found the key and successfully erased it.
return 1;
}
// If another thread succeeds in erasing our key, we'll stop our search.
return 0;
}
// NOTE: the way we count numProbes must be same in find(), insert(),
// and erase(). Otherwise it may break probing.
++numProbes;
if (UNLIKELY(numProbes >= capacity_)) {
// probed every cell...fail
return 0;
}
}
}
template <class KeyT, class ValueT, class HashFcn, class EqualFcn,
class Allocator, class ProbeFcn, class KeyConvertFcn>
typename AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>::SmartPtr
AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>::
create(size_t maxSize, const Config& c) {
CHECK_LE(c.maxLoadFactor, 1.0);
CHECK_GT(c.maxLoadFactor, 0.0);
CHECK_NE(c.emptyKey, c.lockedKey);
size_t capacity = size_t(maxSize / c.maxLoadFactor);
size_t sz = sizeof(AtomicHashArray) + sizeof(value_type) * capacity;
auto const mem = Allocator().allocate(sz);
try {
new (mem) AtomicHashArray(capacity, c.emptyKey, c.lockedKey, c.erasedKey,
c.maxLoadFactor, c.entryCountThreadCacheSize);
} catch (...) {
Allocator().deallocate(mem, sz);
throw;
}
SmartPtr map(static_cast<AtomicHashArray*>((void *)mem));
/*
* Mark all cells as empty.
*
* Note: we're bending the rules a little here accessing the key
* element in our cells even though the cell object has not been
* constructed, and casting them to atomic objects (see cellKeyPtr).
* (Also, in fact we never actually invoke the value_type
* constructor.) This is in order to avoid needing to default
* construct a bunch of value_type when we first start up: if you
* have an expensive default constructor for the value type this can
* noticeably speed construction time for an AHA.
*/
FOR_EACH_RANGE(i, 0, map->capacity_) {
cellKeyPtr(map->cells_[i])->store(map->kEmptyKey_,
std::memory_order_relaxed);
}
return map;
}
template <class KeyT, class ValueT, class HashFcn, class EqualFcn,
class Allocator, class ProbeFcn, class KeyConvertFcn>
void AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>::
destroy(AtomicHashArray* p) {
assert(p);
size_t sz = sizeof(AtomicHashArray) + sizeof(value_type) * p->capacity_;
FOR_EACH_RANGE(i, 0, p->capacity_) {
if (p->cells_[i].first != p->kEmptyKey_) {
p->cells_[i].~value_type();
}
}
p->~AtomicHashArray();
Allocator().deallocate((char *)p, sz);
}
// clear -- clears all keys and values in the map and resets all counters
template <class KeyT, class ValueT, class HashFcn, class EqualFcn,
class Allocator, class ProbeFcn, class KeyConvertFcn>
void AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>::
clear() {
FOR_EACH_RANGE(i, 0, capacity_) {
if (cells_[i].first != kEmptyKey_) {
cells_[i].~value_type();
*const_cast<KeyT*>(&cells_[i].first) = kEmptyKey_;
}
CHECK(cells_[i].first == kEmptyKey_);
}
numEntries_.set(0);
numPendingEntries_.set(0);
isFull_.store(0, std::memory_order_relaxed);
numErases_.store(0, std::memory_order_relaxed);
}
// Iterator implementation
template <class KeyT, class ValueT, class HashFcn, class EqualFcn,
class Allocator, class ProbeFcn, class KeyConvertFcn>
template <class ContT, class IterVal>
struct AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>::
aha_iterator
: boost::iterator_facade<aha_iterator<ContT,IterVal>,
IterVal,
boost::forward_traversal_tag>
{
explicit aha_iterator() : aha_(0) {}
// Conversion ctor for interoperability between const_iterator and
// iterator. The enable_if<> magic keeps us well-behaved for
// is_convertible<> (v. the iterator_facade documentation).
template<class OtherContT, class OtherVal>
aha_iterator(const aha_iterator<OtherContT,OtherVal>& o,
typename std::enable_if<
std::is_convertible<OtherVal*,IterVal*>::value >::type* = 0)
: aha_(o.aha_)
, offset_(o.offset_)
{}
explicit aha_iterator(ContT* array, size_t offset)
: aha_(array)
, offset_(offset)
{}
// Returns unique index that can be used with findAt().
// WARNING: The following function will fail silently for hashtable
// with capacity > 2^32
uint32_t getIndex() const { return offset_; }
void advancePastEmpty() {
while (offset_ < aha_->capacity_ && !isValid()) {
++offset_;
}
}
private:
friend class AtomicHashArray;
friend class boost::iterator_core_access;
void increment() {
++offset_;
advancePastEmpty();
}
bool equal(const aha_iterator& o) const {
return aha_ == o.aha_ && offset_ == o.offset_;
}
IterVal& dereference() const {
return aha_->cells_[offset_];
}
bool isValid() const {
KeyT key = acquireLoadKey(aha_->cells_[offset_]);
return key != aha_->kEmptyKey_ &&
key != aha_->kLockedKey_ &&
key != aha_->kErasedKey_;
}
private:
ContT* aha_;
size_t offset_;
}; // aha_iterator
} // namespace folly

431
ios/Pods/Folly/folly/AtomicHashArray.h generated Normal file
View File

@ -0,0 +1,431 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* AtomicHashArray is the building block for AtomicHashMap. It provides the
* core lock-free functionality, but is limited by the fact that it cannot
* grow past its initialization size and is a little more awkward (no public
* constructor, for example). If you're confident that you won't run out of
* space, don't mind the awkardness, and really need bare-metal performance,
* feel free to use AHA directly.
*
* Check out AtomicHashMap.h for more thorough documentation on perf and
* general pros and cons relative to other hash maps.
*
* @author Spencer Ahrens <sahrens@fb.com>
* @author Jordan DeLong <delong.j@fb.com>
*/
#pragma once
#define FOLLY_ATOMICHASHARRAY_H_
#include <atomic>
#include <boost/iterator/iterator_facade.hpp>
#include <boost/noncopyable.hpp>
#include <folly/Hash.h>
#include <folly/ThreadCachedInt.h>
namespace folly {
struct AtomicHashArrayLinearProbeFcn
{
inline size_t operator()(size_t idx,
size_t /* numProbes */,
size_t capacity) const {
idx += 1; // linear probing
// Avoid modulus because it's slow
return LIKELY(idx < capacity) ? idx : (idx - capacity);
}
};
struct AtomicHashArrayQuadraticProbeFcn
{
inline size_t operator()(size_t idx, size_t numProbes, size_t capacity) const{
idx += numProbes; // quadratic probing
// Avoid modulus because it's slow
return LIKELY(idx < capacity) ? idx : (idx - capacity);
}
};
// Enables specializing checkLegalKey without specializing its class.
namespace detail {
// Local copy of folly::gen::Identity, to avoid heavy dependencies.
class AHAIdentity {
public:
template<class Value>
auto operator()(Value&& value) const ->
decltype(std::forward<Value>(value)) {
return std::forward<Value>(value);
}
};
template <typename NotKeyT, typename KeyT>
inline void checkLegalKeyIfKeyTImpl(NotKeyT /* ignored */,
KeyT /* emptyKey */,
KeyT /* lockedKey */,
KeyT /* erasedKey */) {}
template <typename KeyT>
inline void checkLegalKeyIfKeyTImpl(KeyT key_in, KeyT emptyKey,
KeyT lockedKey, KeyT erasedKey) {
DCHECK_NE(key_in, emptyKey);
DCHECK_NE(key_in, lockedKey);
DCHECK_NE(key_in, erasedKey);
}
} // namespace detail
template <class KeyT, class ValueT,
class HashFcn = std::hash<KeyT>,
class EqualFcn = std::equal_to<KeyT>,
class Allocator = std::allocator<char>,
class ProbeFcn = AtomicHashArrayLinearProbeFcn,
class KeyConvertFcn = detail::AHAIdentity>
class AtomicHashMap;
template <class KeyT, class ValueT,
class HashFcn = std::hash<KeyT>,
class EqualFcn = std::equal_to<KeyT>,
class Allocator = std::allocator<char>,
class ProbeFcn = AtomicHashArrayLinearProbeFcn,
class KeyConvertFcn = detail::AHAIdentity>
class AtomicHashArray : boost::noncopyable {
static_assert((std::is_convertible<KeyT,int32_t>::value ||
std::is_convertible<KeyT,int64_t>::value ||
std::is_convertible<KeyT,const void*>::value),
"You are trying to use AtomicHashArray with disallowed key "
"types. You must use atomically compare-and-swappable integer "
"keys, or a different container class.");
public:
typedef KeyT key_type;
typedef ValueT mapped_type;
typedef HashFcn hasher;
typedef EqualFcn key_equal;
typedef KeyConvertFcn key_convert;
typedef std::pair<const KeyT, ValueT> value_type;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef value_type& reference;
typedef const value_type& const_reference;
typedef value_type* pointer;
typedef const value_type* const_pointer;
const size_t capacity_;
const size_t maxEntries_;
const KeyT kEmptyKey_;
const KeyT kLockedKey_;
const KeyT kErasedKey_;
template<class ContT, class IterVal>
struct aha_iterator;
typedef aha_iterator<const AtomicHashArray,const value_type> const_iterator;
typedef aha_iterator<AtomicHashArray,value_type> iterator;
// You really shouldn't need this if you use the SmartPtr provided by create,
// but if you really want to do something crazy like stick the released
// pointer into a DescriminatedPtr or something, you'll need this to clean up
// after yourself.
static void destroy(AtomicHashArray*);
private:
const size_t kAnchorMask_;
struct Deleter {
void operator()(AtomicHashArray* ptr) {
AtomicHashArray::destroy(ptr);
}
};
public:
typedef std::unique_ptr<AtomicHashArray, Deleter> SmartPtr;
/*
* create --
*
* Creates AtomicHashArray objects. Use instead of constructor/destructor.
*
* We do things this way in order to avoid the perf penalty of a second
* pointer indirection when composing these into AtomicHashMap, which needs
* to store an array of pointers so that it can perform atomic operations on
* them when growing.
*
* Instead of a mess of arguments, we take a max size and a Config struct to
* simulate named ctor parameters. The Config struct has sensible defaults
* for everything, but is overloaded - if you specify a positive capacity,
* that will be used directly instead of computing it based on
* maxLoadFactor.
*
* Create returns an AHA::SmartPtr which is a unique_ptr with a custom
* deleter to make sure everything is cleaned up properly.
*/
struct Config {
KeyT emptyKey;
KeyT lockedKey;
KeyT erasedKey;
double maxLoadFactor;
double growthFactor;
int entryCountThreadCacheSize;
size_t capacity; // if positive, overrides maxLoadFactor
public:
// Cannot have constexpr ctor because some compilers rightly complain.
Config() : emptyKey((KeyT)-1),
lockedKey((KeyT)-2),
erasedKey((KeyT)-3),
maxLoadFactor(0.8),
growthFactor(-1),
entryCountThreadCacheSize(1000),
capacity(0) {}
};
// Cannot have pre-instantiated const Config instance because of SIOF.
static SmartPtr create(size_t maxSize, const Config& c = Config());
/*
* find --
*
*
* Returns the iterator to the element if found, otherwise end().
*
* As an optional feature, the type of the key to look up (LookupKeyT) is
* allowed to be different from the type of keys actually stored (KeyT).
*
* This enables use cases where materializing the key is costly and usually
* redudant, e.g., canonicalizing/interning a set of strings and being able
* to look up by StringPiece. To use this feature, LookupHashFcn must take
* a LookupKeyT, and LookupEqualFcn must take KeyT and LookupKeyT as first
* and second parameter, respectively.
*
* See folly/test/ArrayHashArrayTest.cpp for sample usage.
*/
template <typename LookupKeyT = key_type,
typename LookupHashFcn = hasher,
typename LookupEqualFcn = key_equal>
iterator find(LookupKeyT k) {
return iterator(this,
findInternal<LookupKeyT, LookupHashFcn, LookupEqualFcn>(k).idx);
}
template <typename LookupKeyT = key_type,
typename LookupHashFcn = hasher,
typename LookupEqualFcn = key_equal>
const_iterator find(LookupKeyT k) const {
return const_cast<AtomicHashArray*>(this)->
find<LookupKeyT, LookupHashFcn, LookupEqualFcn>(k);
}
/*
* insert --
*
* Returns a pair with iterator to the element at r.first and bool success.
* Retrieve the index with ret.first.getIndex().
*
* Fails on key collision (does not overwrite) or if map becomes
* full, at which point no element is inserted, iterator is set to end(),
* and success is set false. On collisions, success is set false, but the
* iterator is set to the existing entry.
*/
std::pair<iterator,bool> insert(const value_type& r) {
return emplace(r.first, r.second);
}
std::pair<iterator,bool> insert(value_type&& r) {
return emplace(r.first, std::move(r.second));
}
/*
* emplace --
*
* Same contract as insert(), but performs in-place construction
* of the value type using the specified arguments.
*
* Also, like find(), this method optionally allows 'key_in' to have a type
* different from that stored in the table; see find(). If and only if no
* equal key is already present, this method converts 'key_in' to a key of
* type KeyT using the provided LookupKeyToKeyFcn.
*/
template <typename LookupKeyT = key_type,
typename LookupHashFcn = hasher,
typename LookupEqualFcn = key_equal,
typename LookupKeyToKeyFcn = key_convert,
typename... ArgTs>
std::pair<iterator,bool> emplace(LookupKeyT key_in, ArgTs&&... vCtorArgs) {
SimpleRetT ret = insertInternal<LookupKeyT,
LookupHashFcn,
LookupEqualFcn,
LookupKeyToKeyFcn>(
key_in,
std::forward<ArgTs>(vCtorArgs)...);
return std::make_pair(iterator(this, ret.idx), ret.success);
}
// returns the number of elements erased - should never exceed 1
size_t erase(KeyT k);
// clears all keys and values in the map and resets all counters. Not thread
// safe.
void clear();
// Exact number of elements in the map - note that readFull() acquires a
// mutex. See folly/ThreadCachedInt.h for more details.
size_t size() const {
return numEntries_.readFull() -
numErases_.load(std::memory_order_relaxed);
}
bool empty() const { return size() == 0; }
iterator begin() {
iterator it(this, 0);
it.advancePastEmpty();
return it;
}
const_iterator begin() const {
const_iterator it(this, 0);
it.advancePastEmpty();
return it;
}
iterator end() { return iterator(this, capacity_); }
const_iterator end() const { return const_iterator(this, capacity_); }
// See AtomicHashMap::findAt - access elements directly
// WARNING: The following 2 functions will fail silently for hashtable
// with capacity > 2^32
iterator findAt(uint32_t idx) {
DCHECK_LT(idx, capacity_);
return iterator(this, idx);
}
const_iterator findAt(uint32_t idx) const {
return const_cast<AtomicHashArray*>(this)->findAt(idx);
}
iterator makeIter(size_t idx) { return iterator(this, idx); }
const_iterator makeIter(size_t idx) const {
return const_iterator(this, idx);
}
// The max load factor allowed for this map
double maxLoadFactor() const { return ((double) maxEntries_) / capacity_; }
void setEntryCountThreadCacheSize(uint32_t newSize) {
numEntries_.setCacheSize(newSize);
numPendingEntries_.setCacheSize(newSize);
}
int getEntryCountThreadCacheSize() const {
return numEntries_.getCacheSize();
}
/* Private data and helper functions... */
private:
friend class AtomicHashMap<KeyT,
ValueT,
HashFcn,
EqualFcn,
Allocator,
ProbeFcn>;
struct SimpleRetT { size_t idx; bool success;
SimpleRetT(size_t i, bool s) : idx(i), success(s) {}
SimpleRetT() = default;
};
template <typename LookupKeyT = key_type,
typename LookupHashFcn = hasher,
typename LookupEqualFcn = key_equal,
typename LookupKeyToKeyFcn = detail::AHAIdentity,
typename... ArgTs>
SimpleRetT insertInternal(LookupKeyT key, ArgTs&&... vCtorArgs);
template <typename LookupKeyT = key_type,
typename LookupHashFcn = hasher,
typename LookupEqualFcn = key_equal>
SimpleRetT findInternal(const LookupKeyT key);
template <typename MaybeKeyT>
void checkLegalKeyIfKey(MaybeKeyT key) {
detail::checkLegalKeyIfKeyTImpl(key, kEmptyKey_, kLockedKey_, kErasedKey_);
}
static std::atomic<KeyT>* cellKeyPtr(const value_type& r) {
// We need some illegal casting here in order to actually store
// our value_type as a std::pair<const,>. But a little bit of
// undefined behavior never hurt anyone ...
static_assert(sizeof(std::atomic<KeyT>) == sizeof(KeyT),
"std::atomic is implemented in an unexpected way for AHM");
return
const_cast<std::atomic<KeyT>*>(
reinterpret_cast<std::atomic<KeyT> const*>(&r.first));
}
static KeyT relaxedLoadKey(const value_type& r) {
return cellKeyPtr(r)->load(std::memory_order_relaxed);
}
static KeyT acquireLoadKey(const value_type& r) {
return cellKeyPtr(r)->load(std::memory_order_acquire);
}
// Fun with thread local storage - atomic increment is expensive
// (relatively), so we accumulate in the thread cache and periodically
// flush to the actual variable, and walk through the unflushed counts when
// reading the value, so be careful of calling size() too frequently. This
// increases insertion throughput several times over while keeping the count
// accurate.
ThreadCachedInt<uint64_t> numEntries_; // Successful key inserts
ThreadCachedInt<uint64_t> numPendingEntries_; // Used by insertInternal
std::atomic<int64_t> isFull_; // Used by insertInternal
std::atomic<int64_t> numErases_; // Successful key erases
value_type cells_[0]; // This must be the last field of this class
// Force constructor/destructor private since create/destroy should be
// used externally instead
AtomicHashArray(size_t capacity, KeyT emptyKey, KeyT lockedKey,
KeyT erasedKey, double maxLoadFactor, size_t cacheSize);
~AtomicHashArray() = default;
inline void unlockCell(value_type* const cell, KeyT newKey) {
cellKeyPtr(*cell)->store(newKey, std::memory_order_release);
}
inline bool tryLockCell(value_type* const cell) {
KeyT expect = kEmptyKey_;
return cellKeyPtr(*cell)->compare_exchange_strong(expect, kLockedKey_,
std::memory_order_acq_rel);
}
template <class LookupKeyT = key_type, class LookupHashFcn = hasher>
inline size_t keyToAnchorIdx(const LookupKeyT k) const {
const size_t hashVal = LookupHashFcn()(k);
const size_t probe = hashVal & kAnchorMask_;
return LIKELY(probe < capacity_) ? probe : hashVal % capacity_;
}
}; // AtomicHashArray
} // namespace folly
#include <folly/AtomicHashArray-inl.h>

533
ios/Pods/Folly/folly/AtomicHashMap-inl.h generated Normal file
View File

@ -0,0 +1,533 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FOLLY_ATOMICHASHMAP_H_
#error "This should only be included by AtomicHashMap.h"
#endif
#include <folly/detail/AtomicHashUtils.h>
namespace folly {
// AtomicHashMap constructor -- Atomic wrapper that allows growth
// This class has a lot of overhead (184 Bytes) so only use for big maps
template <typename KeyT,
typename ValueT,
typename HashFcn,
typename EqualFcn,
typename Allocator,
typename ProbeFcn,
typename KeyConvertFcn>
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>::
AtomicHashMap(size_t finalSizeEst, const Config& config)
: kGrowthFrac_(config.growthFactor < 0 ?
1.0 - config.maxLoadFactor : config.growthFactor) {
CHECK(config.maxLoadFactor > 0.0 && config.maxLoadFactor < 1.0);
subMaps_[0].store(SubMap::create(finalSizeEst, config).release(),
std::memory_order_relaxed);
auto subMapCount = kNumSubMaps_;
FOR_EACH_RANGE(i, 1, subMapCount) {
subMaps_[i].store(nullptr, std::memory_order_relaxed);
}
numMapsAllocated_.store(1, std::memory_order_relaxed);
}
// emplace --
template <typename KeyT,
typename ValueT,
typename HashFcn,
typename EqualFcn,
typename Allocator,
typename ProbeFcn,
typename KeyConvertFcn>
template <typename LookupKeyT,
typename LookupHashFcn,
typename LookupEqualFcn,
typename LookupKeyToKeyFcn,
typename... ArgTs>
std::pair<typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator,
ProbeFcn, KeyConvertFcn>::iterator, bool>
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>::
emplace(LookupKeyT k, ArgTs&&... vCtorArgs) {
SimpleRetT ret = insertInternal<LookupKeyT,
LookupHashFcn,
LookupEqualFcn,
LookupKeyToKeyFcn>(
k, std::forward<ArgTs>(vCtorArgs)...);
SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed);
return std::make_pair(iterator(this, ret.i, subMap->makeIter(ret.j)),
ret.success);
}
// insertInternal -- Allocates new sub maps as existing ones fill up.
template <typename KeyT,
typename ValueT,
typename HashFcn,
typename EqualFcn,
typename Allocator,
typename ProbeFcn,
typename KeyConvertFcn>
template <typename LookupKeyT,
typename LookupHashFcn,
typename LookupEqualFcn,
typename LookupKeyToKeyFcn,
typename... ArgTs>
typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>::
SimpleRetT
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>::
insertInternal(LookupKeyT key, ArgTs&&... vCtorArgs) {
beginInsertInternal:
auto nextMapIdx = // this maintains our state
numMapsAllocated_.load(std::memory_order_acquire);
typename SubMap::SimpleRetT ret;
FOR_EACH_RANGE(i, 0, nextMapIdx) {
// insert in each map successively. If one succeeds, we're done!
SubMap* subMap = subMaps_[i].load(std::memory_order_relaxed);
ret = subMap->template insertInternal<LookupKeyT,
LookupHashFcn,
LookupEqualFcn,
LookupKeyToKeyFcn>(
key, std::forward<ArgTs>(vCtorArgs)...);
if (ret.idx == subMap->capacity_) {
continue; //map is full, so try the next one
}
// Either collision or success - insert in either case
return SimpleRetT(i, ret.idx, ret.success);
}
// If we made it this far, all maps are full and we need to try to allocate
// the next one.
SubMap* primarySubMap = subMaps_[0].load(std::memory_order_relaxed);
if (nextMapIdx >= kNumSubMaps_ ||
primarySubMap->capacity_ * kGrowthFrac_ < 1.0) {
// Can't allocate any more sub maps.
throw AtomicHashMapFullError();
}
if (tryLockMap(nextMapIdx)) {
// Alloc a new map and shove it in. We can change whatever
// we want because other threads are waiting on us...
size_t numCellsAllocated = (size_t)
(primarySubMap->capacity_ *
std::pow(1.0 + kGrowthFrac_, nextMapIdx - 1));
size_t newSize = (int) (numCellsAllocated * kGrowthFrac_);
DCHECK(subMaps_[nextMapIdx].load(std::memory_order_relaxed) ==
(SubMap*)kLockedPtr_);
// create a new map using the settings stored in the first map
Config config;
config.emptyKey = primarySubMap->kEmptyKey_;
config.lockedKey = primarySubMap->kLockedKey_;
config.erasedKey = primarySubMap->kErasedKey_;
config.maxLoadFactor = primarySubMap->maxLoadFactor();
config.entryCountThreadCacheSize =
primarySubMap->getEntryCountThreadCacheSize();
subMaps_[nextMapIdx].store(SubMap::create(newSize, config).release(),
std::memory_order_relaxed);
// Publish the map to other threads.
numMapsAllocated_.fetch_add(1, std::memory_order_release);
DCHECK_EQ(nextMapIdx + 1,
numMapsAllocated_.load(std::memory_order_relaxed));
} else {
// If we lost the race, we'll have to wait for the next map to get
// allocated before doing any insertion here.
detail::atomic_hash_spin_wait([&] {
return nextMapIdx >= numMapsAllocated_.load(std::memory_order_acquire);
});
}
// Relaxed is ok here because either we just created this map, or we
// just did a spin wait with an acquire load on numMapsAllocated_.
SubMap* loadedMap = subMaps_[nextMapIdx].load(std::memory_order_relaxed);
DCHECK(loadedMap && loadedMap != (SubMap*)kLockedPtr_);
ret = loadedMap->insertInternal(key, std::forward<ArgTs>(vCtorArgs)...);
if (ret.idx != loadedMap->capacity_) {
return SimpleRetT(nextMapIdx, ret.idx, ret.success);
}
// We took way too long and the new map is already full...try again from
// the top (this should pretty much never happen).
goto beginInsertInternal;
}
// find --
template <typename KeyT,
typename ValueT,
typename HashFcn,
typename EqualFcn,
typename Allocator,
typename ProbeFcn,
typename KeyConvertFcn>
template <class LookupKeyT, class LookupHashFcn, class LookupEqualFcn>
typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>::
iterator
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>::find(
LookupKeyT k) {
SimpleRetT ret = findInternal<LookupKeyT, LookupHashFcn, LookupEqualFcn>(k);
if (!ret.success) {
return end();
}
SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed);
return iterator(this, ret.i, subMap->makeIter(ret.j));
}
template <typename KeyT,
typename ValueT,
typename HashFcn,
typename EqualFcn,
typename Allocator,
typename ProbeFcn,
typename KeyConvertFcn>
template <class LookupKeyT, class LookupHashFcn, class LookupEqualFcn>
typename AtomicHashMap<KeyT, ValueT,
HashFcn, EqualFcn, Allocator, ProbeFcn, KeyConvertFcn>::const_iterator
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>::
find(LookupKeyT k) const {
return const_cast<AtomicHashMap*>(this)->find<LookupKeyT,
LookupHashFcn,
LookupEqualFcn>(k);
}
// findInternal --
template <typename KeyT,
typename ValueT,
typename HashFcn,
typename EqualFcn,
typename Allocator,
typename ProbeFcn,
typename KeyConvertFcn>
template <class LookupKeyT, class LookupHashFcn, class LookupEqualFcn>
typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>::
SimpleRetT
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>::
findInternal(const LookupKeyT k) const {
SubMap* const primaryMap = subMaps_[0].load(std::memory_order_relaxed);
typename SubMap::SimpleRetT ret =
primaryMap->template findInternal<LookupKeyT,
LookupHashFcn,
LookupEqualFcn>(k);
if (LIKELY(ret.idx != primaryMap->capacity_)) {
return SimpleRetT(0, ret.idx, ret.success);
}
int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
FOR_EACH_RANGE(i, 1, numMaps) {
// Check each map successively. If one succeeds, we're done!
SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
ret = thisMap->template findInternal<LookupKeyT,
LookupHashFcn,
LookupEqualFcn>(k);
if (LIKELY(ret.idx != thisMap->capacity_)) {
return SimpleRetT(i, ret.idx, ret.success);
}
}
// Didn't find our key...
return SimpleRetT(numMaps, 0, false);
}
// findAtInternal -- see encodeIndex() for details.
template <typename KeyT,
typename ValueT,
typename HashFcn,
typename EqualFcn,
typename Allocator,
typename ProbeFcn,
typename KeyConvertFcn>
typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>::
SimpleRetT
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>::
findAtInternal(uint32_t idx) const {
uint32_t subMapIdx, subMapOffset;
if (idx & kSecondaryMapBit_) {
// idx falls in a secondary map
idx &= ~kSecondaryMapBit_; // unset secondary bit
subMapIdx = idx >> kSubMapIndexShift_;
DCHECK_LT(subMapIdx, numMapsAllocated_.load(std::memory_order_relaxed));
subMapOffset = idx & kSubMapIndexMask_;
} else {
// idx falls in primary map
subMapIdx = 0;
subMapOffset = idx;
}
return SimpleRetT(subMapIdx, subMapOffset, true);
}
// erase --
template <typename KeyT,
typename ValueT,
typename HashFcn,
typename EqualFcn,
typename Allocator,
typename ProbeFcn,
typename KeyConvertFcn>
typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>::
size_type
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>::
erase(const KeyT k) {
int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
FOR_EACH_RANGE(i, 0, numMaps) {
// Check each map successively. If one succeeds, we're done!
if (subMaps_[i].load(std::memory_order_relaxed)->erase(k)) {
return 1;
}
}
// Didn't find our key...
return 0;
}
// capacity -- summation of capacities of all submaps
template <typename KeyT,
typename ValueT,
typename HashFcn,
typename EqualFcn,
typename Allocator,
typename ProbeFcn,
typename KeyConvertFcn>
size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>::
capacity() const {
size_t totalCap(0);
int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
FOR_EACH_RANGE(i, 0, numMaps) {
totalCap += subMaps_[i].load(std::memory_order_relaxed)->capacity_;
}
return totalCap;
}
// spaceRemaining --
// number of new insertions until current submaps are all at max load
template <typename KeyT,
typename ValueT,
typename HashFcn,
typename EqualFcn,
typename Allocator,
typename ProbeFcn,
typename KeyConvertFcn>
size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>::
spaceRemaining() const {
size_t spaceRem(0);
int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
FOR_EACH_RANGE(i, 0, numMaps) {
SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
spaceRem += std::max(
0,
thisMap->maxEntries_ - &thisMap->numEntries_.readFull()
);
}
return spaceRem;
}
// clear -- Wipes all keys and values from primary map and destroys
// all secondary maps. Not thread safe.
template <typename KeyT,
typename ValueT,
typename HashFcn,
typename EqualFcn,
typename Allocator,
typename ProbeFcn,
typename KeyConvertFcn>
void AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>::
clear() {
subMaps_[0].load(std::memory_order_relaxed)->clear();
int const numMaps = numMapsAllocated_
.load(std::memory_order_relaxed);
FOR_EACH_RANGE(i, 1, numMaps) {
SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
DCHECK(thisMap);
SubMap::destroy(thisMap);
subMaps_[i].store(nullptr, std::memory_order_relaxed);
}
numMapsAllocated_.store(1, std::memory_order_relaxed);
}
// size --
template <typename KeyT,
typename ValueT,
typename HashFcn,
typename EqualFcn,
typename Allocator,
typename ProbeFcn,
typename KeyConvertFcn>
size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>::
size() const {
size_t totalSize(0);
int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
FOR_EACH_RANGE(i, 0, numMaps) {
totalSize += subMaps_[i].load(std::memory_order_relaxed)->size();
}
return totalSize;
}
// encodeIndex -- Encode the submap index and offset into return.
// index_ret must be pre-populated with the submap offset.
//
// We leave index_ret untouched when referring to the primary map
// so it can be as large as possible (31 data bits). Max size of
// secondary maps is limited by what can fit in the low 27 bits.
//
// Returns the following bit-encoded data in index_ret:
// if subMap == 0 (primary map) =>
// bit(s) value
// 31 0
// 0-30 submap offset (index_ret input)
//
// if subMap > 0 (secondary maps) =>
// bit(s) value
// 31 1
// 27-30 which subMap
// 0-26 subMap offset (index_ret input)
template <typename KeyT,
typename ValueT,
typename HashFcn,
typename EqualFcn,
typename Allocator,
typename ProbeFcn,
typename KeyConvertFcn>
inline uint32_t
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>::
encodeIndex(uint32_t subMap, uint32_t offset) {
DCHECK_EQ(offset & kSecondaryMapBit_, 0); // offset can't be too big
if (subMap == 0) return offset;
// Make sure subMap isn't too big
DCHECK_EQ(subMap >> kNumSubMapBits_, 0);
// Make sure subMap bits of offset are clear
DCHECK_EQ(offset & (~kSubMapIndexMask_ | kSecondaryMapBit_), 0);
// Set high-order bits to encode which submap this index belongs to
return offset | (subMap << kSubMapIndexShift_) | kSecondaryMapBit_;
}
// Iterator implementation
template <typename KeyT,
typename ValueT,
typename HashFcn,
typename EqualFcn,
typename Allocator,
typename ProbeFcn,
typename KeyConvertFcn>
template <class ContT, class IterVal, class SubIt>
struct AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>::
ahm_iterator : boost::iterator_facade<ahm_iterator<ContT, IterVal, SubIt>,
IterVal,
boost::forward_traversal_tag> {
explicit ahm_iterator() : ahm_(0) {}
// Conversion ctor for interoperability between const_iterator and
// iterator. The enable_if<> magic keeps us well-behaved for
// is_convertible<> (v. the iterator_facade documentation).
template<class OtherContT, class OtherVal, class OtherSubIt>
ahm_iterator(const ahm_iterator<OtherContT,OtherVal,OtherSubIt>& o,
typename std::enable_if<
std::is_convertible<OtherSubIt,SubIt>::value >::type* = 0)
: ahm_(o.ahm_)
, subMap_(o.subMap_)
, subIt_(o.subIt_)
{}
/*
* Returns the unique index that can be used for access directly
* into the data storage.
*/
uint32_t getIndex() const {
CHECK(!isEnd());
return ahm_->encodeIndex(subMap_, subIt_.getIndex());
}
private:
friend class AtomicHashMap;
explicit ahm_iterator(ContT* ahm,
uint32_t subMap,
const SubIt& subIt)
: ahm_(ahm)
, subMap_(subMap)
, subIt_(subIt)
{}
friend class boost::iterator_core_access;
void increment() {
CHECK(!isEnd());
++subIt_;
checkAdvanceToNextSubmap();
}
bool equal(const ahm_iterator& other) const {
if (ahm_ != other.ahm_) {
return false;
}
if (isEnd() || other.isEnd()) {
return isEnd() == other.isEnd();
}
return subMap_ == other.subMap_ &&
subIt_ == other.subIt_;
}
IterVal& dereference() const {
return *subIt_;
}
bool isEnd() const { return ahm_ == nullptr; }
void checkAdvanceToNextSubmap() {
if (isEnd()) {
return;
}
SubMap* thisMap = ahm_->subMaps_[subMap_].
load(std::memory_order_relaxed);
while (subIt_ == thisMap->end()) {
// This sub iterator is done, advance to next one
if (subMap_ + 1 <
ahm_->numMapsAllocated_.load(std::memory_order_acquire)) {
++subMap_;
thisMap = ahm_->subMaps_[subMap_].load(std::memory_order_relaxed);
subIt_ = thisMap->begin();
} else {
ahm_ = nullptr;
return;
}
}
}
private:
ContT* ahm_;
uint32_t subMap_;
SubIt subIt_;
}; // ahm_iterator
} // namespace folly

474
ios/Pods/Folly/folly/AtomicHashMap.h generated Normal file
View File

@ -0,0 +1,474 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* AtomicHashMap --
*
* A high-performance concurrent hash map with int32 or int64 keys. Supports
* insert, find(key), findAt(index), erase(key), size, and more. Memory cannot
* be freed or reclaimed by erase. Can grow to a maximum of about 18 times the
* initial capacity, but performance degrades linearly with growth. Can also be
* used as an object store with unique 32-bit references directly into the
* internal storage (retrieved with iterator::getIndex()).
*
* Advantages:
* - High-performance (~2-4x tbb::concurrent_hash_map in heavily
* multi-threaded environments).
* - Efficient memory usage if initial capacity is not over estimated
* (especially for small keys and values).
* - Good fragmentation properties (only allocates in large slabs which can
* be reused with clear() and never move).
* - Can generate unique, long-lived 32-bit references for efficient lookup
* (see findAt()).
*
* Disadvantages:
* - Keys must be native int32 or int64, or explicitly converted.
* - Must be able to specify unique empty, locked, and erased keys
* - Performance degrades linearly as size grows beyond initialization
* capacity.
* - Max size limit of ~18x initial size (dependent on max load factor).
* - Memory is not freed or reclaimed by erase.
*
* Usage and Operation Details:
* Simple performance/memory tradeoff with maxLoadFactor. Higher load factors
* give better memory utilization but probe lengths increase, reducing
* performance.
*
* Implementation and Performance Details:
* AHArray is a fixed size contiguous block of value_type cells. When
* writing a cell, the key is locked while the rest of the record is
* written. Once done, the cell is unlocked by setting the key. find()
* is completely wait-free and doesn't require any non-relaxed atomic
* operations. AHA cannot grow beyond initialization capacity, but is
* faster because of reduced data indirection.
*
* AHMap is a wrapper around AHArray sub-maps that allows growth and provides
* an interface closer to the STL UnorderedAssociativeContainer concept. These
* sub-maps are allocated on the fly and are processed in series, so the more
* there are (from growing past initial capacity), the worse the performance.
*
* Insert returns false if there is a key collision and throws if the max size
* of the map is exceeded.
*
* Benchmark performance with 8 simultaneous threads processing 1 million
* unique <int64, int64> entries on a 4-core, 2.5 GHz machine:
*
* Load Factor Mem Efficiency usec/Insert usec/Find
* 50% 50% 0.19 0.05
* 85% 85% 0.20 0.06
* 90% 90% 0.23 0.08
* 95% 95% 0.27 0.10
*
* See folly/tests/AtomicHashMapTest.cpp for more benchmarks.
*
* @author Spencer Ahrens <sahrens@fb.com>
* @author Jordan DeLong <delong.j@fb.com>
*
*/
#pragma once
#define FOLLY_ATOMICHASHMAP_H_
#include <boost/iterator/iterator_facade.hpp>
#include <boost/noncopyable.hpp>
#include <boost/type_traits/is_convertible.hpp>
#include <stdexcept>
#include <functional>
#include <atomic>
#include <folly/AtomicHashArray.h>
#include <folly/Foreach.h>
#include <folly/Hash.h>
#include <folly/Likely.h>
#include <folly/ThreadCachedInt.h>
namespace folly {
/*
* AtomicHashMap provides an interface somewhat similar to the
* UnorderedAssociativeContainer concept in C++. This does not
* exactly match this concept (or even the basic Container concept),
* because of some restrictions imposed by our datastructure.
*
* Specific differences (there are quite a few):
*
* - Efficiently thread safe for inserts (main point of this stuff),
* wait-free for lookups.
*
* - You can erase from this container, but the cell containing the key will
* not be free or reclaimed.
*
* - You can erase everything by calling clear() (and you must guarantee only
* one thread can be using the container to do that).
*
* - We aren't DefaultConstructible, CopyConstructible, Assignable, or
* EqualityComparable. (Most of these are probably not something
* you actually want to do with this anyway.)
*
* - We don't support the various bucket functions, rehash(),
* reserve(), or equal_range(). Also no constructors taking
* iterators, although this could change.
*
* - Several insertion functions, notably operator[], are not
* implemented. It is a little too easy to misuse these functions
* with this container, where part of the point is that when an
* insertion happens for a new key, it will atomically have the
* desired value.
*
* - The map has no templated insert() taking an iterator range, but
* we do provide an insert(key, value). The latter seems more
* frequently useful for this container (to avoid sprinkling
* make_pair everywhere), and providing both can lead to some gross
* template error messages.
*
* - The Allocator must not be stateful (a new instance will be spun up for
* each allocation), and its allocate() method must take a raw number of
* bytes.
*
* - KeyT must be a 32 bit or 64 bit atomic integer type, and you must
* define special 'locked' and 'empty' key values in the ctor
*
* - We don't take the Hash function object as an instance in the
* constructor.
*
*/
// Thrown when insertion fails due to running out of space for
// submaps.
struct AtomicHashMapFullError : std::runtime_error {
explicit AtomicHashMapFullError()
: std::runtime_error("AtomicHashMap is full")
{}
};
template<class KeyT, class ValueT, class HashFcn, class EqualFcn,
class Allocator, class ProbeFcn, class KeyConvertFcn>
class AtomicHashMap : boost::noncopyable {
typedef AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>
SubMap;
public:
typedef KeyT key_type;
typedef ValueT mapped_type;
typedef std::pair<const KeyT, ValueT> value_type;
typedef HashFcn hasher;
typedef EqualFcn key_equal;
typedef KeyConvertFcn key_convert;
typedef value_type* pointer;
typedef value_type& reference;
typedef const value_type& const_reference;
typedef std::ptrdiff_t difference_type;
typedef std::size_t size_type;
typedef typename SubMap::Config Config;
template<class ContT, class IterVal, class SubIt>
struct ahm_iterator;
typedef ahm_iterator<const AtomicHashMap,
const value_type,
typename SubMap::const_iterator>
const_iterator;
typedef ahm_iterator<AtomicHashMap,
value_type,
typename SubMap::iterator>
iterator;
public:
const float kGrowthFrac_; // How much to grow when we run out of capacity.
// The constructor takes a finalSizeEst which is the optimal
// number of elements to maximize space utilization and performance,
// and a Config object to specify more advanced options.
explicit AtomicHashMap(size_t finalSizeEst, const Config& c = Config());
~AtomicHashMap() {
const int numMaps = numMapsAllocated_.load(std::memory_order_relaxed);
FOR_EACH_RANGE (i, 0, numMaps) {
SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
DCHECK(thisMap);
SubMap::destroy(thisMap);
}
}
key_equal key_eq() const { return key_equal(); }
hasher hash_function() const { return hasher(); }
/*
* insert --
*
* Returns a pair with iterator to the element at r.first and
* success. Retrieve the index with ret.first.getIndex().
*
* Does not overwrite on key collision, but returns an iterator to
* the existing element (since this could due to a race with
* another thread, it is often important to check this return
* value).
*
* Allocates new sub maps as the existing ones become full. If
* all sub maps are full, no element is inserted, and
* AtomicHashMapFullError is thrown.
*/
std::pair<iterator,bool> insert(const value_type& r) {
return emplace(r.first, r.second);
}
std::pair<iterator,bool> insert(key_type k, const mapped_type& v) {
return emplace(k, v);
}
std::pair<iterator,bool> insert(value_type&& r) {
return emplace(r.first, std::move(r.second));
}
std::pair<iterator,bool> insert(key_type k, mapped_type&& v) {
return emplace(k, std::move(v));
}
/*
* emplace --
*
* Same contract as insert(), but performs in-place construction
* of the value type using the specified arguments.
*
* Also, like find(), this method optionally allows 'key_in' to have a type
* different from that stored in the table; see find(). If and only if no
* equal key is already present, this method converts 'key_in' to a key of
* type KeyT using the provided LookupKeyToKeyFcn.
*/
template <typename LookupKeyT = key_type,
typename LookupHashFcn = hasher,
typename LookupEqualFcn = key_equal,
typename LookupKeyToKeyFcn = key_convert,
typename... ArgTs>
std::pair<iterator,bool> emplace(LookupKeyT k, ArgTs&&... vCtorArg);
/*
* find --
*
* Returns the iterator to the element if found, otherwise end().
*
* As an optional feature, the type of the key to look up (LookupKeyT) is
* allowed to be different from the type of keys actually stored (KeyT).
*
* This enables use cases where materializing the key is costly and usually
* redudant, e.g., canonicalizing/interning a set of strings and being able
* to look up by StringPiece. To use this feature, LookupHashFcn must take
* a LookupKeyT, and LookupEqualFcn must take KeyT and LookupKeyT as first
* and second parameter, respectively.
*
* See folly/test/ArrayHashMapTest.cpp for sample usage.
*/
template <typename LookupKeyT = key_type,
typename LookupHashFcn = hasher,
typename LookupEqualFcn = key_equal>
iterator find(LookupKeyT k);
template <typename LookupKeyT = key_type,
typename LookupHashFcn = hasher,
typename LookupEqualFcn = key_equal>
const_iterator find(LookupKeyT k) const;
/*
* erase --
*
* Erases key k from the map
*
* Returns 1 iff the key is found and erased, and 0 otherwise.
*/
size_type erase(key_type k);
/*
* clear --
*
* Wipes all keys and values from primary map and destroys all secondary
* maps. Primary map remains allocated and thus the memory can be reused
* in place. Not thread safe.
*
*/
void clear();
/*
* size --
*
* Returns the exact size of the map. Note this is not as cheap as typical
* size() implementations because, for each AtomicHashArray in this AHM, we
* need to grab a lock and accumulate the values from all the thread local
* counters. See folly/ThreadCachedInt.h for more details.
*/
size_t size() const;
bool empty() const { return size() == 0; }
size_type count(key_type k) const {
return find(k) == end() ? 0 : 1;
}
/*
* findAt --
*
* Returns an iterator into the map.
*
* idx should only be an unmodified value returned by calling getIndex() on
* a valid iterator returned by find() or insert(). If idx is invalid you
* have a bug and the process aborts.
*/
iterator findAt(uint32_t idx) {
SimpleRetT ret = findAtInternal(idx);
DCHECK_LT(ret.i, numSubMaps());
return iterator(this, ret.i,
subMaps_[ret.i].load(std::memory_order_relaxed)->makeIter(ret.j));
}
const_iterator findAt(uint32_t idx) const {
return const_cast<AtomicHashMap*>(this)->findAt(idx);
}
// Total capacity - summation of capacities of all submaps.
size_t capacity() const;
// Number of new insertions until current submaps are all at max load factor.
size_t spaceRemaining() const;
void setEntryCountThreadCacheSize(int32_t newSize) {
const int numMaps = numMapsAllocated_.load(std::memory_order_acquire);
for (int i = 0; i < numMaps; ++i) {
SubMap* map = subMaps_[i].load(std::memory_order_relaxed);
map->setEntryCountThreadCacheSize(newSize);
}
}
// Number of sub maps allocated so far to implement this map. The more there
// are, the worse the performance.
int numSubMaps() const {
return numMapsAllocated_.load(std::memory_order_acquire);
}
iterator begin() {
iterator it(this, 0,
subMaps_[0].load(std::memory_order_relaxed)->begin());
it.checkAdvanceToNextSubmap();
return it;
}
const_iterator begin() const {
const_iterator it(this, 0,
subMaps_[0].load(std::memory_order_relaxed)->begin());
it.checkAdvanceToNextSubmap();
return it;
}
iterator end() {
return iterator();
}
const_iterator end() const {
return const_iterator();
}
/* Advanced functions for direct access: */
inline uint32_t recToIdx(const value_type& r, bool mayInsert = true) {
SimpleRetT ret = mayInsert ?
insertInternal(r.first, r.second) : findInternal(r.first);
return encodeIndex(ret.i, ret.j);
}
inline uint32_t recToIdx(value_type&& r, bool mayInsert = true) {
SimpleRetT ret = mayInsert ?
insertInternal(r.first, std::move(r.second)) : findInternal(r.first);
return encodeIndex(ret.i, ret.j);
}
inline uint32_t recToIdx(key_type k, const mapped_type& v,
bool mayInsert = true) {
SimpleRetT ret = mayInsert ? insertInternal(k, v) : findInternal(k);
return encodeIndex(ret.i, ret.j);
}
inline uint32_t recToIdx(key_type k, mapped_type&& v, bool mayInsert = true) {
SimpleRetT ret = mayInsert ?
insertInternal(k, std::move(v)) : findInternal(k);
return encodeIndex(ret.i, ret.j);
}
inline uint32_t keyToIdx(const KeyT k, bool mayInsert = false) {
return recToIdx(value_type(k), mayInsert);
}
inline const value_type& idxToRec(uint32_t idx) const {
SimpleRetT ret = findAtInternal(idx);
return subMaps_[ret.i].load(std::memory_order_relaxed)->idxToRec(ret.j);
}
/* Private data and helper functions... */
private:
// This limits primary submap size to 2^31 ~= 2 billion, secondary submap
// size to 2^(32 - kNumSubMapBits_ - 1) = 2^27 ~= 130 million, and num subMaps
// to 2^kNumSubMapBits_ = 16.
static const uint32_t kNumSubMapBits_ = 4;
static const uint32_t kSecondaryMapBit_ = 1u << 31; // Highest bit
static const uint32_t kSubMapIndexShift_ = 32 - kNumSubMapBits_ - 1;
static const uint32_t kSubMapIndexMask_ = (1 << kSubMapIndexShift_) - 1;
static const uint32_t kNumSubMaps_ = 1 << kNumSubMapBits_;
static const uintptr_t kLockedPtr_ = 0x88ULL << 48; // invalid pointer
struct SimpleRetT { uint32_t i; size_t j; bool success;
SimpleRetT(uint32_t ii, size_t jj, bool s) : i(ii), j(jj), success(s) {}
SimpleRetT() = default;
};
template <typename LookupKeyT = key_type,
typename LookupHashFcn = hasher,
typename LookupEqualFcn = key_equal,
typename LookupKeyToKeyFcn = key_convert,
typename... ArgTs>
SimpleRetT insertInternal(LookupKeyT key, ArgTs&&... value);
template <typename LookupKeyT = key_type,
typename LookupHashFcn = hasher,
typename LookupEqualFcn = key_equal>
SimpleRetT findInternal(const LookupKeyT k) const;
SimpleRetT findAtInternal(uint32_t idx) const;
std::atomic<SubMap*> subMaps_[kNumSubMaps_];
std::atomic<uint32_t> numMapsAllocated_;
inline bool tryLockMap(int idx) {
SubMap* val = nullptr;
return subMaps_[idx].compare_exchange_strong(val, (SubMap*)kLockedPtr_,
std::memory_order_acquire);
}
static inline uint32_t encodeIndex(uint32_t subMap, uint32_t subMapIdx);
}; // AtomicHashMap
template <class KeyT,
class ValueT,
class HashFcn = std::hash<KeyT>,
class EqualFcn = std::equal_to<KeyT>,
class Allocator = std::allocator<char>>
using QuadraticProbingAtomicHashMap =
AtomicHashMap<KeyT,
ValueT,
HashFcn,
EqualFcn,
Allocator,
AtomicHashArrayQuadraticProbeFcn>;
} // namespace folly
#include <folly/AtomicHashMap-inl.h>

View File

@ -0,0 +1,137 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <atomic>
#include <cassert>
namespace folly {
/**
* A very simple atomic single-linked list primitive.
*
* Usage:
*
* class MyClass {
* AtomicIntrusiveLinkedListHook<MyClass> hook_;
* }
*
* AtomicIntrusiveLinkedList<MyClass, &MyClass::hook_> list;
* list.insert(&a);
* list.sweep([] (MyClass* c) { doSomething(c); }
*/
template <class T>
struct AtomicIntrusiveLinkedListHook {
T* next{nullptr};
};
template <class T, AtomicIntrusiveLinkedListHook<T> T::*HookMember>
class AtomicIntrusiveLinkedList {
public:
AtomicIntrusiveLinkedList() {}
AtomicIntrusiveLinkedList(const AtomicIntrusiveLinkedList&) = delete;
AtomicIntrusiveLinkedList& operator=(const AtomicIntrusiveLinkedList&) =
delete;
AtomicIntrusiveLinkedList(AtomicIntrusiveLinkedList&& other) noexcept {
auto tmp = other.head_.load();
other.head_ = head_.load();
head_ = tmp;
}
AtomicIntrusiveLinkedList& operator=(
AtomicIntrusiveLinkedList&& other) noexcept {
auto tmp = other.head_.load();
other.head_ = head_.load();
head_ = tmp;
return *this;
}
/**
* Note: list must be empty on destruction.
*/
~AtomicIntrusiveLinkedList() {
assert(empty());
}
bool empty() const {
return head_.load() == nullptr;
}
/**
* Atomically insert t at the head of the list.
* @return True if the inserted element is the only one in the list
* after the call.
*/
bool insertHead(T* t) {
assert(next(t) == nullptr);
auto oldHead = head_.load(std::memory_order_relaxed);
do {
next(t) = oldHead;
/* oldHead is updated by the call below.
NOTE: we don't use next(t) instead of oldHead directly due to
compiler bugs (GCC prior to 4.8.3 (bug 60272), clang (bug 18899),
MSVC (bug 819819); source:
http://en.cppreference.com/w/cpp/atomic/atomic/compare_exchange */
} while (!head_.compare_exchange_weak(oldHead, t,
std::memory_order_release,
std::memory_order_relaxed));
return oldHead == nullptr;
}
/**
* Repeatedly replaces the head with nullptr,
* and calls func() on the removed elements in the order from tail to head.
* Stops when the list is empty.
*/
template <typename F>
void sweep(F&& func) {
while (auto head = head_.exchange(nullptr)) {
auto rhead = reverse(head);
while (rhead != nullptr) {
auto t = rhead;
rhead = next(t);
next(t) = nullptr;
func(t);
}
}
}
private:
std::atomic<T*> head_{nullptr};
static T*& next(T* t) {
return (t->*HookMember).next;
}
/* Reverses a linked list, returning the pointer to the new head
(old tail) */
static T* reverse(T* head) {
T* rhead = nullptr;
while (head != nullptr) {
auto t = head;
head = next(t);
next(t) = rhead;
rhead = t;
}
return rhead;
}
};
} // namespace folly

86
ios/Pods/Folly/folly/AtomicLinkedList.h generated Normal file
View File

@ -0,0 +1,86 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/AtomicIntrusiveLinkedList.h>
#include <folly/Memory.h>
namespace folly {
/**
* A very simple atomic single-linked list primitive.
*
* Usage:
*
* AtomicLinkedList<MyClass> list;
* list.insert(a);
* list.sweep([] (MyClass& c) { doSomething(c); }
*/
template <class T>
class AtomicLinkedList {
public:
AtomicLinkedList() {}
AtomicLinkedList(const AtomicLinkedList&) = delete;
AtomicLinkedList& operator=(const AtomicLinkedList&) = delete;
AtomicLinkedList(AtomicLinkedList&& other) noexcept = default;
AtomicLinkedList& operator=(AtomicLinkedList&& other) = default;
~AtomicLinkedList() {
sweep([](T&&) {});
}
bool empty() const {
return list_.empty();
}
/**
* Atomically insert t at the head of the list.
* @return True if the inserted element is the only one in the list
* after the call.
*/
bool insertHead(T t) {
auto wrapper = folly::make_unique<Wrapper>(std::move(t));
return list_.insertHead(wrapper.release());
}
/**
* Repeatedly pops element from head,
* and calls func() on the removed elements in the order from tail to head.
* Stops when the list is empty.
*/
template <typename F>
void sweep(F&& func) {
list_.sweep([&](Wrapper* wrapperPtr) mutable {
std::unique_ptr<Wrapper> wrapper(wrapperPtr);
func(std::move(wrapper->data));
});
}
private:
struct Wrapper {
explicit Wrapper(T&& t) : data(std::move(t)) {}
AtomicIntrusiveLinkedListHook<Wrapper> hook;
T data;
};
AtomicIntrusiveLinkedList<Wrapper, &Wrapper::hook> list_;
};
} // namespace folly

139
ios/Pods/Folly/folly/AtomicStruct.h generated Normal file
View File

@ -0,0 +1,139 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <atomic>
#include <type_traits>
#include <folly/Traits.h>
#include <string.h>
#include <stdint.h>
namespace folly {
namespace detail {
template <int N> struct AtomicStructIntPick {};
}
/// AtomicStruct<T> work like C++ atomics, but can be used on any POD
/// type <= 8 bytes.
template <
typename T,
template<typename> class Atom = std::atomic,
typename Raw = typename detail::AtomicStructIntPick<sizeof(T)>::type>
class AtomicStruct {
static_assert(alignof(T) <= alignof(Raw),
"target type can't have stricter alignment than matching int");
static_assert(sizeof(T) <= sizeof(Raw),
"underlying type isn't big enough");
static_assert(std::is_trivial<T>::value ||
folly::IsTriviallyCopyable<T>::value,
"target type must be trivially copyable");
union {
Atom<Raw> data;
T typedData;
};
static Raw encode(T v) noexcept {
// we expect the compiler to optimize away the memcpy, but without
// it we would violate strict aliasing rules
Raw d = 0;
memcpy(&d, &v, sizeof(T));
return d;
}
static T decode(Raw d) noexcept {
T v;
memcpy(&v, &d, sizeof(T));
return v;
}
public:
AtomicStruct() = default;
~AtomicStruct() = default;
AtomicStruct(AtomicStruct<T> const &) = delete;
AtomicStruct<T>& operator= (AtomicStruct<T> const &) = delete;
constexpr /* implicit */ AtomicStruct(T v) noexcept : typedData(v) {}
bool is_lock_free() const noexcept {
return data.is_lock_free();
}
bool compare_exchange_strong(
T& v0, T v1,
std::memory_order mo = std::memory_order_seq_cst) noexcept {
Raw d0 = encode(v0);
bool rv = data.compare_exchange_strong(d0, encode(v1), mo);
if (!rv) {
v0 = decode(d0);
}
return rv;
}
bool compare_exchange_weak(
T& v0, T v1,
std::memory_order mo = std::memory_order_seq_cst) noexcept {
Raw d0 = encode(v0);
bool rv = data.compare_exchange_weak(d0, encode(v1), mo);
if (!rv) {
v0 = decode(d0);
}
return rv;
}
T exchange(T v, std::memory_order mo = std::memory_order_seq_cst) noexcept {
return decode(data.exchange(encode(v), mo));
}
/* implicit */ operator T () const noexcept {
return decode(data);
}
T load(std::memory_order mo = std::memory_order_seq_cst) const noexcept {
return decode(data.load(mo));
}
T operator= (T v) noexcept {
return decode(data = encode(v));
}
void store(T v, std::memory_order mo = std::memory_order_seq_cst) noexcept {
data.store(encode(v), mo);
}
// std::atomic also provides volatile versions of all of the access
// methods. These are callable on volatile objects, and also can
// theoretically have different implementations than their non-volatile
// counterpart. If someone wants them here they can easily be added
// by duplicating the above code and the corresponding unit tests.
};
namespace detail {
template <> struct AtomicStructIntPick<1> { typedef uint8_t type; };
template <> struct AtomicStructIntPick<2> { typedef uint16_t type; };
template <> struct AtomicStructIntPick<3> { typedef uint32_t type; };
template <> struct AtomicStructIntPick<4> { typedef uint32_t type; };
template <> struct AtomicStructIntPick<5> { typedef uint64_t type; };
template <> struct AtomicStructIntPick<6> { typedef uint64_t type; };
template <> struct AtomicStructIntPick<7> { typedef uint64_t type; };
template <> struct AtomicStructIntPick<8> { typedef uint64_t type; };
} // namespace detail
} // namespace folly

523
ios/Pods/Folly/folly/AtomicUnorderedMap.h generated Normal file
View File

@ -0,0 +1,523 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <atomic>
#include <functional>
#include <stdexcept>
#include <system_error>
#include <type_traits>
#include <stdint.h>
#include <folly/Bits.h>
#include <folly/Conv.h>
#include <folly/Likely.h>
#include <folly/Random.h>
#include <folly/detail/AtomicUnorderedMapUtils.h>
#include <folly/portability/SysMman.h>
#include <folly/portability/Unistd.h>
#include <boost/type_traits/has_trivial_destructor.hpp>
#include <limits>
namespace folly {
/// You're probably reading this because you are looking for an
/// AtomicUnorderedMap<K,V> that is fully general, highly concurrent (for
/// reads, writes, and iteration), and makes no performance compromises.
/// We haven't figured that one out yet. What you will find here is a
/// hash table implementation that sacrifices generality so that it can
/// give you all of the other things.
///
/// LIMITATIONS:
///
/// * Insert only (*) - the only write operation supported directly by
/// AtomicUnorderedInsertMap is findOrConstruct. There is a (*) because
/// values aren't moved, so you can roll your own concurrency control for
/// in-place updates of values (see MutableData and MutableAtom below),
/// but the hash table itself doesn't help you.
///
/// * No resizing - you must specify the capacity up front, and once
/// the hash map gets full you won't be able to insert. Insert
/// performance will degrade once the load factor is high. Insert is
/// O(1/(1-actual_load_factor)). Note that this is a pretty strong
/// limitation, because you can't remove existing keys.
///
/// * 2^30 maximum default capacity - by default AtomicUnorderedInsertMap
/// uses uint32_t internal indexes (and steals 2 bits), limiting you
/// to about a billion entries. If you need more you can fill in all
/// of the template params so you change IndexType to uint64_t, or you
/// can use AtomicUnorderedInsertMap64. 64-bit indexes will increase
/// the space over of the map, of course.
///
/// WHAT YOU GET IN EXCHANGE:
///
/// * Arbitrary key and value types - any K and V that can be used in a
/// std::unordered_map can be used here. In fact, the key and value
/// types don't even have to be copyable or moveable!
///
/// * Keys and values in the map won't be moved - it is safe to keep
/// pointers or references to the keys and values in the map, because
/// they are never moved or destroyed (until the map itself is destroyed).
///
/// * Iterators are never invalidated - writes don't invalidate iterators,
/// so you can scan and insert in parallel.
///
/// * Fast wait-free reads - reads are usually only a single cache miss,
/// even when the hash table is very large. Wait-freedom means that
/// you won't see latency outliers even in the face of concurrent writes.
///
/// * Lock-free insert - writes proceed in parallel. If a thread in the
/// middle of a write is unlucky and gets suspended, it doesn't block
/// anybody else.
///
/// COMMENTS ON INSERT-ONLY
///
/// This map provides wait-free linearizable reads and lock-free
/// linearizable inserts. Inserted values won't be moved, but no
/// concurrency control is provided for safely updating them. To remind
/// you of that fact they are only provided in const form. This is the
/// only simple safe thing to do while preserving something like the normal
/// std::map iteration form, which requires that iteration be exposed
/// via std::pair (and prevents encapsulation of access to the value).
///
/// There are a couple of reasonable policies for doing in-place
/// concurrency control on the values. I am hoping that the policy can
/// be injected via the value type or an extra template param, to keep
/// the core AtomicUnorderedInsertMap insert-only:
///
/// CONST: this is the currently implemented strategy, which is simple,
/// performant, and not that expressive. You can always put in a value
/// with a mutable field (see MutableAtom below), but that doesn't look
/// as pretty as it should.
///
/// ATOMIC: for integers and integer-size trivially copyable structs
/// (via an adapter like tao/queues/AtomicStruct) the value can be a
/// std::atomic and read and written atomically.
///
/// SEQ-LOCK: attach a counter incremented before and after write.
/// Writers serialize by using CAS to make an even->odd transition,
/// then odd->even after the write. Readers grab the value with memcpy,
/// checking sequence value before and after. Readers retry until they
/// see an even sequence number that doesn't change. This works for
/// larger structs, but still requires memcpy to be equivalent to copy
/// assignment, and it is no longer lock-free. It scales very well,
/// because the readers are still invisible (no cache line writes).
///
/// LOCK: folly's SharedMutex would be a good choice here.
///
/// MEMORY ALLOCATION
///
/// Underlying memory is allocated as a big anonymous mmap chunk, which
/// might be cheaper than calloc() and is certainly not more expensive
/// for large maps. If the SkipKeyValueDeletion template param is true
/// then deletion of the map consists of unmapping the backing memory,
/// which is much faster than destructing all of the keys and values.
/// Feel free to override if std::is_trivial_destructor isn't recognizing
/// the triviality of your destructors.
template <typename Key,
typename Value,
typename Hash = std::hash<Key>,
typename KeyEqual = std::equal_to<Key>,
bool SkipKeyValueDeletion =
(boost::has_trivial_destructor<Key>::value &&
boost::has_trivial_destructor<Value>::value),
template<typename> class Atom = std::atomic,
typename IndexType = uint32_t,
typename Allocator = folly::detail::MMapAlloc>
struct AtomicUnorderedInsertMap {
typedef Key key_type;
typedef Value mapped_type;
typedef std::pair<Key,Value> value_type;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef Hash hasher;
typedef KeyEqual key_equal;
typedef const value_type& const_reference;
typedef struct ConstIterator {
ConstIterator(const AtomicUnorderedInsertMap& owner, IndexType slot)
: owner_(owner)
, slot_(slot)
{}
ConstIterator(const ConstIterator&) = default;
ConstIterator& operator= (const ConstIterator&) = default;
const value_type& operator* () const {
return owner_.slots_[slot_].keyValue();
}
const value_type* operator-> () const {
return &owner_.slots_[slot_].keyValue();
}
// pre-increment
const ConstIterator& operator++ () {
while (slot_ > 0) {
--slot_;
if (owner_.slots_[slot_].state() == LINKED) {
break;
}
}
return *this;
}
// post-increment
ConstIterator operator++(int /* dummy */) {
auto prev = *this;
++*this;
return prev;
}
bool operator== (const ConstIterator& rhs) const {
return slot_ == rhs.slot_;
}
bool operator!= (const ConstIterator& rhs) const {
return !(*this == rhs);
}
private:
const AtomicUnorderedInsertMap& owner_;
IndexType slot_;
} const_iterator;
friend ConstIterator;
/// Constructs a map that will support the insertion of maxSize key-value
/// pairs without exceeding the max load factor. Load factors of greater
/// than 1 are not supported, and once the actual load factor of the
/// map approaches 1 the insert performance will suffer. The capacity
/// is limited to 2^30 (about a billion) for the default IndexType,
/// beyond which we will throw invalid_argument.
explicit AtomicUnorderedInsertMap(
size_t maxSize,
float maxLoadFactor = 0.8f,
const Allocator& alloc = Allocator())
: allocator_(alloc)
{
size_t capacity = maxSize / std::min(1.0f, maxLoadFactor) + 128;
size_t avail = size_t{1} << (8 * sizeof(IndexType) - 2);
if (capacity > avail && maxSize < avail) {
// we'll do our best
capacity = avail;
}
if (capacity < maxSize || capacity > avail) {
throw std::invalid_argument(
"AtomicUnorderedInsertMap capacity must fit in IndexType with 2 bits "
"left over");
}
numSlots_ = capacity;
slotMask_ = folly::nextPowTwo(capacity * 4) - 1;
mmapRequested_ = sizeof(Slot) * capacity;
slots_ = reinterpret_cast<Slot*>(allocator_.allocate(mmapRequested_));
zeroFillSlots();
// mark the zero-th slot as in-use but not valid, since that happens
// to be our nil value
slots_[0].stateUpdate(EMPTY, CONSTRUCTING);
}
~AtomicUnorderedInsertMap() {
if (!SkipKeyValueDeletion) {
for (size_t i = 1; i < numSlots_; ++i) {
slots_[i].~Slot();
}
}
allocator_.deallocate(reinterpret_cast<char*>(slots_), mmapRequested_);
}
/// Searches for the key, returning (iter,false) if it is found.
/// If it is not found calls the functor Func with a void* argument
/// that is raw storage suitable for placement construction of a Value
/// (see raw_value_type), then returns (iter,true). May call Func and
/// then return (iter,false) if there are other concurrent writes, in
/// which case the newly constructed value will be immediately destroyed.
///
/// This function does not block other readers or writers. If there
/// are other concurrent writes, many parallel calls to func may happen
/// and only the first one to complete will win. The values constructed
/// by the other calls to func will be destroyed.
///
/// Usage:
///
/// AtomicUnorderedInsertMap<std::string,std::string> memo;
///
/// auto value = memo.findOrConstruct(key, [=](void* raw) {
/// new (raw) std::string(computation(key));
/// })->first;
template<typename Func>
std::pair<const_iterator,bool> findOrConstruct(const Key& key, Func&& func) {
auto const slot = keyToSlotIdx(key);
auto prev = slots_[slot].headAndState_.load(std::memory_order_acquire);
auto existing = find(key, slot);
if (existing != 0) {
return std::make_pair(ConstIterator(*this, existing), false);
}
auto idx = allocateNear(slot);
new (&slots_[idx].keyValue().first) Key(key);
func(static_cast<void*>(&slots_[idx].keyValue().second));
while (true) {
slots_[idx].next_ = prev >> 2;
// we can merge the head update and the CONSTRUCTING -> LINKED update
// into a single CAS if slot == idx (which should happen often)
auto after = idx << 2;
if (slot == idx) {
after += LINKED;
} else {
after += (prev & 3);
}
if (slots_[slot].headAndState_.compare_exchange_strong(prev, after)) {
// success
if (idx != slot) {
slots_[idx].stateUpdate(CONSTRUCTING, LINKED);
}
return std::make_pair(ConstIterator(*this, idx), true);
}
// compare_exchange_strong updates its first arg on failure, so
// there is no need to reread prev
existing = find(key, slot);
if (existing != 0) {
// our allocated key and value are no longer needed
slots_[idx].keyValue().first.~Key();
slots_[idx].keyValue().second.~Value();
slots_[idx].stateUpdate(CONSTRUCTING, EMPTY);
return std::make_pair(ConstIterator(*this, existing), false);
}
}
}
/// This isn't really emplace, but it is what we need to test.
/// Eventually we can duplicate all of the std::pair constructor
/// forms, including a recursive tuple forwarding template
/// http://functionalcpp.wordpress.com/2013/08/28/tuple-forwarding/).
template<class K, class V>
std::pair<const_iterator,bool> emplace(const K& key, V&& value) {
return findOrConstruct(key, [&](void* raw) {
new (raw) Value(std::forward<V>(value));
});
}
const_iterator find(const Key& key) const {
return ConstIterator(*this, find(key, keyToSlotIdx(key)));
}
const_iterator cbegin() const {
IndexType slot = numSlots_ - 1;
while (slot > 0 && slots_[slot].state() != LINKED) {
--slot;
}
return ConstIterator(*this, slot);
}
const_iterator cend() const {
return ConstIterator(*this, 0);
}
private:
enum {
kMaxAllocationTries = 1000, // after this we throw
};
enum BucketState : IndexType {
EMPTY = 0,
CONSTRUCTING = 1,
LINKED = 2,
};
/// Lock-free insertion is easiest by prepending to collision chains.
/// A large chaining hash table takes two cache misses instead of
/// one, however. Our solution is to colocate the bucket storage and
/// the head storage, so that even though we are traversing chains we
/// are likely to stay within the same cache line. Just make sure to
/// traverse head before looking at any keys. This strategy gives us
/// 32 bit pointers and fast iteration.
struct Slot {
/// The bottom two bits are the BucketState, the rest is the index
/// of the first bucket for the chain whose keys map to this slot.
/// When things are going well the head usually links to this slot,
/// but that doesn't always have to happen.
Atom<IndexType> headAndState_;
/// The next bucket in the chain
IndexType next_;
/// Key and Value
typename std::aligned_storage<sizeof(value_type),
alignof(value_type)>::type raw_;
~Slot() {
auto s = state();
assert(s == EMPTY || s == LINKED);
if (s == LINKED) {
keyValue().first.~Key();
keyValue().second.~Value();
}
}
BucketState state() const {
return BucketState(headAndState_.load(std::memory_order_acquire) & 3);
}
void stateUpdate(BucketState before, BucketState after) {
assert(state() == before);
headAndState_ += (after - before);
}
value_type& keyValue() {
assert(state() != EMPTY);
return *static_cast<value_type*>(static_cast<void*>(&raw_));
}
const value_type& keyValue() const {
assert(state() != EMPTY);
return *static_cast<const value_type*>(static_cast<const void*>(&raw_));
}
};
// We manually manage the slot memory so we can bypass initialization
// (by getting a zero-filled mmap chunk) and optionally destruction of
// the slots
size_t mmapRequested_;
size_t numSlots_;
/// tricky, see keyToSlodIdx
size_t slotMask_;
Allocator allocator_;
Slot* slots_;
IndexType keyToSlotIdx(const Key& key) const {
size_t h = hasher()(key);
h &= slotMask_;
while (h >= numSlots_) {
h -= numSlots_;
}
return h;
}
IndexType find(const Key& key, IndexType slot) const {
KeyEqual ke = {};
auto hs = slots_[slot].headAndState_.load(std::memory_order_acquire);
for (slot = hs >> 2; slot != 0; slot = slots_[slot].next_) {
if (ke(key, slots_[slot].keyValue().first)) {
return slot;
}
}
return 0;
}
/// Allocates a slot and returns its index. Tries to put it near
/// slots_[start].
IndexType allocateNear(IndexType start) {
for (auto tries = 0; tries < kMaxAllocationTries; ++tries) {
auto slot = allocationAttempt(start, tries);
auto prev = slots_[slot].headAndState_.load(std::memory_order_acquire);
if ((prev & 3) == EMPTY &&
slots_[slot].headAndState_.compare_exchange_strong(
prev, prev + CONSTRUCTING - EMPTY)) {
return slot;
}
}
throw std::bad_alloc();
}
/// Returns the slot we should attempt to allocate after tries failed
/// tries, starting from the specified slot. This is pulled out so we
/// can specialize it differently during deterministic testing
IndexType allocationAttempt(IndexType start, IndexType tries) const {
if (LIKELY(tries < 8 && start + tries < numSlots_)) {
return start + tries;
} else {
IndexType rv;
if (sizeof(IndexType) <= 4) {
rv = folly::Random::rand32(numSlots_);
} else {
rv = folly::Random::rand64(numSlots_);
}
assert(rv < numSlots_);
return rv;
}
}
void zeroFillSlots() {
using folly::detail::GivesZeroFilledMemory;
if (!GivesZeroFilledMemory<Allocator>::value) {
memset(slots_, 0, mmapRequested_);
}
}
};
/// AtomicUnorderedInsertMap64 is just a type alias that makes it easier
/// to select a 64 bit slot index type. Use this if you need a capacity
/// bigger than 2^30 (about a billion). This increases memory overheads,
/// obviously.
template <typename Key,
typename Value,
typename Hash = std::hash<Key>,
typename KeyEqual = std::equal_to<Key>,
bool SkipKeyValueDeletion =
(boost::has_trivial_destructor<Key>::value &&
boost::has_trivial_destructor<Value>::value),
template <typename> class Atom = std::atomic,
typename Allocator = folly::detail::MMapAlloc>
using AtomicUnorderedInsertMap64 =
AtomicUnorderedInsertMap<Key,
Value,
Hash,
KeyEqual,
SkipKeyValueDeletion,
Atom,
uint64_t,
Allocator>;
/// MutableAtom is a tiny wrapper than gives you the option of atomically
/// updating values inserted into an AtomicUnorderedInsertMap<K,
/// MutableAtom<V>>. This relies on AtomicUnorderedInsertMap's guarantee
/// that it doesn't move values.
template <typename T,
template<typename> class Atom = std::atomic>
struct MutableAtom {
mutable Atom<T> data;
explicit MutableAtom(const T& init) : data(init) {}
};
/// MutableData is a tiny wrapper than gives you the option of using an
/// external concurrency control mechanism to updating values inserted
/// into an AtomicUnorderedInsertMap.
template <typename T>
struct MutableData {
mutable T data;
explicit MutableData(const T& init) : data(init) {}
};
}

298
ios/Pods/Folly/folly/Baton.h generated Normal file
View File

@ -0,0 +1,298 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <stdint.h>
#include <atomic>
#include <errno.h>
#include <assert.h>
#include <folly/detail/Futex.h>
#include <folly/detail/MemoryIdler.h>
#include <folly/portability/Asm.h>
namespace folly {
/// A Baton allows a thread to block once and be awoken: it captures
/// a single handoff. During its lifecycle (from construction/reset to
/// destruction/reset) a baton must either be post()ed and wait()ed exactly
/// once each, or not at all.
///
/// Baton includes no internal padding, and is only 4 bytes in size.
/// Any alignment or padding to avoid false sharing is up to the user.
///
/// This is basically a stripped-down semaphore that supports only a
/// single call to sem_post and a single call to sem_wait. The current
/// posix semaphore sem_t isn't too bad, but this provides more a bit more
/// speed, inlining, smaller size, a guarantee that the implementation
/// won't change, and compatibility with DeterministicSchedule. By having
/// a much more restrictive lifecycle we can also add a bunch of assertions
/// that can help to catch race conditions ahead of time.
template <template<typename> class Atom = std::atomic>
struct Baton {
constexpr Baton() : state_(INIT) {}
Baton(Baton const&) = delete;
Baton& operator=(Baton const&) = delete;
/// It is an error to destroy a Baton on which a thread is currently
/// wait()ing. In practice this means that the waiter usually takes
/// responsibility for destroying the Baton.
~Baton() {
// The docblock for this function says that it can't be called when
// there is a concurrent waiter. We assume a strong version of this
// requirement in which the caller must _know_ that this is true, they
// are not allowed to be merely lucky. If two threads are involved,
// the destroying thread must actually have synchronized with the
// waiting thread after wait() returned. To convey causality the the
// waiting thread must have used release semantics and the destroying
// thread must have used acquire semantics for that communication,
// so we are guaranteed to see the post-wait() value of state_,
// which cannot be WAITING.
//
// Note that since we only care about a single memory location,
// the only two plausible memory orders here are relaxed and seq_cst.
assert(state_.load(std::memory_order_relaxed) != WAITING);
}
/// Equivalent to destroying the Baton and creating a new one. It is
/// a bug to call this while there is a waiting thread, so in practice
/// the waiter will be the one that resets the baton.
void reset() {
// See ~Baton for a discussion about why relaxed is okay here
assert(state_.load(std::memory_order_relaxed) != WAITING);
// We use a similar argument to justify the use of a relaxed store
// here. Since both wait() and post() are required to be called
// only once per lifetime, no thread can actually call those methods
// correctly after a reset() unless it synchronizes with the thread
// that performed the reset(). If a post() or wait() on another thread
// didn't synchronize, then regardless of what operation we performed
// here there would be a race on proper use of the Baton's spec
// (although not on any particular load and store). Put another way,
// we don't need to synchronize here because anybody that might rely
// on such synchronization is required by the baton rules to perform
// an additional synchronization that has the desired effect anyway.
//
// There is actually a similar argument to be made about the
// constructor, in which the fenceless constructor initialization
// of state_ is piggybacked on whatever synchronization mechanism
// distributes knowledge of the Baton's existence
state_.store(INIT, std::memory_order_relaxed);
}
/// Causes wait() to wake up. For each lifetime of a Baton (where a
/// lifetime starts at construction or reset() and ends at destruction
/// or reset()) there can be at most one call to post(). Any thread
/// may call post().
///
/// Although we could implement a more generic semaphore semantics
/// without any extra size or CPU overhead, the single-call limitation
/// allows us to have better assert-ions during debug builds.
void post() {
uint32_t before = state_.load(std::memory_order_acquire);
assert(before == INIT || before == WAITING || before == TIMED_OUT);
if (before == INIT &&
state_.compare_exchange_strong(before, EARLY_DELIVERY)) {
return;
}
assert(before == WAITING || before == TIMED_OUT);
if (before == TIMED_OUT) {
return;
}
assert(before == WAITING);
state_.store(LATE_DELIVERY, std::memory_order_release);
state_.futexWake(1);
}
/// Waits until post() has been called in the current Baton lifetime.
/// May be called at most once during a Baton lifetime (construction
/// |reset until destruction|reset). If post is called before wait in
/// the current lifetime then this method returns immediately.
///
/// The restriction that there can be at most one wait() per lifetime
/// could be relaxed somewhat without any perf or size regressions,
/// but by making this condition very restrictive we can provide better
/// checking in debug builds.
void wait() {
if (spinWaitForEarlyDelivery()) {
assert(state_.load(std::memory_order_acquire) == EARLY_DELIVERY);
return;
}
// guess we have to block :(
uint32_t expected = INIT;
if (!state_.compare_exchange_strong(expected, WAITING)) {
// CAS failed, last minute reprieve
assert(expected == EARLY_DELIVERY);
return;
}
while (true) {
detail::MemoryIdler::futexWait(state_, WAITING);
// state_ is the truth even if FUTEX_WAIT reported a matching
// FUTEX_WAKE, since we aren't using type-stable storage and we
// don't guarantee reuse. The scenario goes like this: thread
// A's last touch of a Baton is a call to wake(), which stores
// LATE_DELIVERY and gets an unlucky context switch before delivering
// the corresponding futexWake. Thread B sees LATE_DELIVERY
// without consuming a futex event, because it calls futexWait
// with an expected value of WAITING and hence doesn't go to sleep.
// B returns, so the Baton's memory is reused and becomes another
// Baton (or a reuse of this one). B calls futexWait on the new
// Baton lifetime, then A wakes up and delivers a spurious futexWake
// to the same memory location. B's futexWait will then report a
// consumed wake event even though state_ is still WAITING.
//
// It would be possible to add an extra state_ dance to communicate
// that the futexWake has been sent so that we can be sure to consume
// it before returning, but that would be a perf and complexity hit.
uint32_t s = state_.load(std::memory_order_acquire);
assert(s == WAITING || s == LATE_DELIVERY);
if (s == LATE_DELIVERY) {
return;
}
// retry
}
}
/// Similar to wait, but with a timeout. The thread is unblocked if the
/// timeout expires.
/// Note: Only a single call to timed_wait/wait is allowed during a baton's
/// life-cycle (from construction/reset to destruction/reset). In other
/// words, after timed_wait the caller can't invoke wait/timed_wait/try_wait
/// again on the same baton without resetting it.
///
/// @param deadline Time until which the thread can block
/// @return true if the baton was posted to before timeout,
/// false otherwise
template <typename Clock, typename Duration = typename Clock::duration>
bool timed_wait(const std::chrono::time_point<Clock,Duration>& deadline) {
if (spinWaitForEarlyDelivery()) {
assert(state_.load(std::memory_order_acquire) == EARLY_DELIVERY);
return true;
}
// guess we have to block :(
uint32_t expected = INIT;
if (!state_.compare_exchange_strong(expected, WAITING)) {
// CAS failed, last minute reprieve
assert(expected == EARLY_DELIVERY);
return true;
}
while (true) {
auto rv = state_.futexWaitUntil(WAITING, deadline);
if (rv == folly::detail::FutexResult::TIMEDOUT) {
state_.store(TIMED_OUT, std::memory_order_release);
return false;
}
uint32_t s = state_.load(std::memory_order_acquire);
assert(s == WAITING || s == LATE_DELIVERY);
if (s == LATE_DELIVERY) {
return true;
}
}
}
/// Similar to timed_wait, but with a duration.
template <typename Clock = std::chrono::steady_clock, typename Duration>
bool timed_wait(const Duration& duration) {
auto deadline = Clock::now() + duration;
return timed_wait(deadline);
}
/// Similar to wait, but doesn't block the thread if it hasn't been posted.
///
/// try_wait has the following semantics:
/// - It is ok to call try_wait any number times on the same baton until
/// try_wait reports that the baton has been posted.
/// - It is ok to call timed_wait or wait on the same baton if try_wait
/// reports that baton hasn't been posted.
/// - If try_wait indicates that the baton has been posted, it is invalid to
/// call wait, try_wait or timed_wait on the same baton without resetting
///
/// @return true if baton has been posted, false othewise
bool try_wait() {
auto s = state_.load(std::memory_order_acquire);
assert(s == INIT || s == EARLY_DELIVERY);
return s == EARLY_DELIVERY;
}
private:
enum State : uint32_t {
INIT = 0,
EARLY_DELIVERY = 1,
WAITING = 2,
LATE_DELIVERY = 3,
TIMED_OUT = 4
};
enum {
// Must be positive. If multiple threads are actively using a
// higher-level data structure that uses batons internally, it is
// likely that the post() and wait() calls happen almost at the same
// time. In this state, we lose big 50% of the time if the wait goes
// to sleep immediately. On circa-2013 devbox hardware it costs about
// 7 usec to FUTEX_WAIT and then be awoken (half the t/iter as the
// posix_sem_pingpong test in BatonTests). We can improve our chances
// of EARLY_DELIVERY by spinning for a bit, although we have to balance
// this against the loss if we end up sleeping any way. Spins on this
// hw take about 7 nanos (all but 0.5 nanos is the pause instruction).
// We give ourself 300 spins, which is about 2 usec of waiting. As a
// partial consolation, since we are using the pause instruction we
// are giving a speed boost to the colocated hyperthread.
PreBlockAttempts = 300,
};
// Spin for "some time" (see discussion on PreBlockAttempts) waiting
// for a post.
//
// @return true if we received an early delivery during the wait,
// false otherwise. If the function returns true then
// state_ is guaranteed to be EARLY_DELIVERY
bool spinWaitForEarlyDelivery() {
static_assert(PreBlockAttempts > 0,
"isn't this assert clearer than an uninitialized variable warning?");
for (int i = 0; i < PreBlockAttempts; ++i) {
if (try_wait()) {
// hooray!
return true;
}
// The pause instruction is the polite way to spin, but it doesn't
// actually affect correctness to omit it if we don't have it.
// Pausing donates the full capabilities of the current core to
// its other hyperthreads for a dozen cycles or so
asm_volatile_pause();
}
return false;
}
detail::Futex<Atom> state_;
};
} // namespace folly

572
ios/Pods/Folly/folly/Benchmark.h generated Normal file
View File

@ -0,0 +1,572 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/Portability.h>
#include <folly/Preprocessor.h> // for FB_ANONYMOUS_VARIABLE
#include <folly/ScopeGuard.h>
#include <folly/Traits.h>
#include <folly/portability/GFlags.h>
#include <folly/portability/Time.h>
#include <cassert>
#include <ctime>
#include <boost/function_types/function_arity.hpp>
#include <functional>
#include <glog/logging.h>
#include <limits>
#include <type_traits>
DECLARE_bool(benchmark);
namespace folly {
/**
* Runs all benchmarks defined. Usually put in main().
*/
void runBenchmarks();
/**
* Runs all benchmarks defined if and only if the --benchmark flag has
* been passed to the program. Usually put in main().
*/
inline bool runBenchmarksOnFlag() {
if (FLAGS_benchmark) {
runBenchmarks();
}
return FLAGS_benchmark;
}
namespace detail {
typedef std::pair<uint64_t, unsigned int> TimeIterPair;
/**
* Adds a benchmark wrapped in a std::function. Only used
* internally. Pass by value is intentional.
*/
void addBenchmarkImpl(const char* file,
const char* name,
std::function<TimeIterPair(unsigned int)>);
/**
* Takes the difference between two timespec values. end is assumed to
* occur after start.
*/
inline uint64_t timespecDiff(timespec end, timespec start) {
if (end.tv_sec == start.tv_sec) {
assert(end.tv_nsec >= start.tv_nsec);
return end.tv_nsec - start.tv_nsec;
}
assert(end.tv_sec > start.tv_sec);
auto diff = uint64_t(end.tv_sec - start.tv_sec);
assert(diff <
std::numeric_limits<uint64_t>::max() / 1000000000UL);
return diff * 1000000000UL
+ end.tv_nsec - start.tv_nsec;
}
/**
* Takes the difference between two sets of timespec values. The first
* two come from a high-resolution clock whereas the other two come
* from a low-resolution clock. The crux of the matter is that
* high-res values may be bogus as documented in
* http://linux.die.net/man/3/clock_gettime. The trouble is when the
* running process migrates from one CPU to another, which is more
* likely for long-running processes. Therefore we watch for high
* differences between the two timings.
*
* This function is subject to further improvements.
*/
inline uint64_t timespecDiff(timespec end, timespec start,
timespec endCoarse, timespec startCoarse) {
auto fine = timespecDiff(end, start);
auto coarse = timespecDiff(endCoarse, startCoarse);
if (coarse - fine >= 1000000) {
// The fine time is in all likelihood bogus
return coarse;
}
return fine;
}
} // namespace detail
/**
* Supporting type for BENCHMARK_SUSPEND defined below.
*/
struct BenchmarkSuspender {
BenchmarkSuspender() {
CHECK_EQ(0, clock_gettime(CLOCK_REALTIME, &start));
}
BenchmarkSuspender(const BenchmarkSuspender &) = delete;
BenchmarkSuspender(BenchmarkSuspender && rhs) noexcept {
start = rhs.start;
rhs.start.tv_nsec = rhs.start.tv_sec = 0;
}
BenchmarkSuspender& operator=(const BenchmarkSuspender &) = delete;
BenchmarkSuspender& operator=(BenchmarkSuspender && rhs) {
if (start.tv_nsec > 0 || start.tv_sec > 0) {
tally();
}
start = rhs.start;
rhs.start.tv_nsec = rhs.start.tv_sec = 0;
return *this;
}
~BenchmarkSuspender() {
if (start.tv_nsec > 0 || start.tv_sec > 0) {
tally();
}
}
void dismiss() {
assert(start.tv_nsec > 0 || start.tv_sec > 0);
tally();
start.tv_nsec = start.tv_sec = 0;
}
void rehire() {
assert(start.tv_nsec == 0 || start.tv_sec == 0);
CHECK_EQ(0, clock_gettime(CLOCK_REALTIME, &start));
}
template <class F>
auto dismissing(F f) -> typename std::result_of<F()>::type {
SCOPE_EXIT { rehire(); };
dismiss();
return f();
}
/**
* This is for use inside of if-conditions, used in BENCHMARK macros.
* If-conditions bypass the explicit on operator bool.
*/
explicit operator bool() const {
return false;
}
/**
* Accumulates nanoseconds spent outside benchmark.
*/
typedef uint64_t NanosecondsSpent;
static NanosecondsSpent nsSpent;
private:
void tally() {
timespec end;
CHECK_EQ(0, clock_gettime(CLOCK_REALTIME, &end));
nsSpent += detail::timespecDiff(end, start);
start = end;
}
timespec start;
};
/**
* Adds a benchmark. Usually not called directly but instead through
* the macro BENCHMARK defined below. The lambda function involved
* must take exactly one parameter of type unsigned, and the benchmark
* uses it with counter semantics (iteration occurs inside the
* function).
*/
template <typename Lambda>
typename std::enable_if<
boost::function_types::function_arity<decltype(&Lambda::operator())>::value
== 2
>::type
addBenchmark(const char* file, const char* name, Lambda&& lambda) {
auto execute = [=](unsigned int times) {
BenchmarkSuspender::nsSpent = 0;
timespec start, end;
unsigned int niter;
// CORE MEASUREMENT STARTS
auto const r1 = clock_gettime(CLOCK_REALTIME, &start);
niter = lambda(times);
auto const r2 = clock_gettime(CLOCK_REALTIME, &end);
// CORE MEASUREMENT ENDS
CHECK_EQ(0, r1);
CHECK_EQ(0, r2);
return detail::TimeIterPair(
detail::timespecDiff(end, start) - BenchmarkSuspender::nsSpent,
niter);
};
detail::addBenchmarkImpl(file, name,
std::function<detail::TimeIterPair(unsigned int)>(execute));
}
/**
* Adds a benchmark. Usually not called directly but instead through
* the macro BENCHMARK defined below. The lambda function involved
* must take zero parameters, and the benchmark calls it repeatedly
* (iteration occurs outside the function).
*/
template <typename Lambda>
typename std::enable_if<
boost::function_types::function_arity<decltype(&Lambda::operator())>::value
== 1
>::type
addBenchmark(const char* file, const char* name, Lambda&& lambda) {
addBenchmark(file, name, [=](unsigned int times) {
unsigned int niter = 0;
while (times-- > 0) {
niter += lambda();
}
return niter;
});
}
/**
* Call doNotOptimizeAway(var) to ensure that var will be computed even
* post-optimization. Use it for variables that are computed during
* benchmarking but otherwise are useless. The compiler tends to do a
* good job at eliminating unused variables, and this function fools it
* into thinking var is in fact needed.
*
* Call makeUnpredictable(var) when you don't want the optimizer to use
* its knowledge of var to shape the following code. This is useful
* when constant propagation or power reduction is possible during your
* benchmark but not in real use cases.
*/
#ifdef _MSC_VER
#pragma optimize("", off)
inline void doNotOptimizeDependencySink(const void*) {}
#pragma optimize("", on)
template <class T>
void doNotOptimizeAway(const T& datum) {
doNotOptimizeDependencySink(&datum);
}
template <typename T>
void makeUnpredictable(T& datum) {
doNotOptimizeDependencySink(&datum);
}
#else
namespace detail {
template <typename T>
struct DoNotOptimizeAwayNeedsIndirect {
using Decayed = typename std::decay<T>::type;
// First two constraints ensure it can be an "r" operand.
// std::is_pointer check is because callers seem to expect that
// doNotOptimizeAway(&x) is equivalent to doNotOptimizeAway(x).
constexpr static bool value = !folly::IsTriviallyCopyable<Decayed>::value ||
sizeof(Decayed) > sizeof(long) || std::is_pointer<Decayed>::value;
};
} // detail namespace
template <typename T>
auto doNotOptimizeAway(const T& datum) -> typename std::enable_if<
!detail::DoNotOptimizeAwayNeedsIndirect<T>::value>::type {
asm volatile("" ::"X"(datum));
}
template <typename T>
auto doNotOptimizeAway(const T& datum) -> typename std::enable_if<
detail::DoNotOptimizeAwayNeedsIndirect<T>::value>::type {
asm volatile("" ::"m"(datum) : "memory");
}
template <typename T>
auto makeUnpredictable(T& datum) -> typename std::enable_if<
!detail::DoNotOptimizeAwayNeedsIndirect<T>::value>::type {
asm volatile("" : "+r"(datum));
}
template <typename T>
auto makeUnpredictable(T& datum) -> typename std::enable_if<
detail::DoNotOptimizeAwayNeedsIndirect<T>::value>::type {
asm volatile("" ::"m"(datum) : "memory");
}
#endif
} // namespace folly
/**
* Introduces a benchmark function. Used internally, see BENCHMARK and
* friends below.
*/
#define BENCHMARK_IMPL(funName, stringName, rv, paramType, paramName) \
static void funName(paramType); \
static bool FB_ANONYMOUS_VARIABLE(follyBenchmarkUnused) = ( \
::folly::addBenchmark(__FILE__, stringName, \
[](paramType paramName) -> unsigned { funName(paramName); \
return rv; }), \
true); \
static void funName(paramType paramName)
/**
* Introduces a benchmark function with support for returning the actual
* number of iterations. Used internally, see BENCHMARK_MULTI and friends
* below.
*/
#define BENCHMARK_MULTI_IMPL(funName, stringName, paramType, paramName) \
static unsigned funName(paramType); \
static bool FB_ANONYMOUS_VARIABLE(follyBenchmarkUnused) = ( \
::folly::addBenchmark(__FILE__, stringName, \
[](paramType paramName) { return funName(paramName); }), \
true); \
static unsigned funName(paramType paramName)
/**
* Introduces a benchmark function. Use with either one or two arguments.
* The first is the name of the benchmark. Use something descriptive, such
* as insertVectorBegin. The second argument may be missing, or could be a
* symbolic counter. The counter dictates how many internal iteration the
* benchmark does. Example:
*
* BENCHMARK(vectorPushBack) {
* vector<int> v;
* v.push_back(42);
* }
*
* BENCHMARK(insertVectorBegin, n) {
* vector<int> v;
* FOR_EACH_RANGE (i, 0, n) {
* v.insert(v.begin(), 42);
* }
* }
*/
#define BENCHMARK(name, ...) \
BENCHMARK_IMPL( \
name, \
FB_STRINGIZE(name), \
FB_ARG_2_OR_1(1, ## __VA_ARGS__), \
FB_ONE_OR_NONE(unsigned, ## __VA_ARGS__), \
__VA_ARGS__)
/**
* Like BENCHMARK above, but allows the user to return the actual
* number of iterations executed in the function body. This can be
* useful if the benchmark function doesn't know upfront how many
* iterations it's going to run or if it runs through a certain
* number of test cases, e.g.:
*
* BENCHMARK_MULTI(benchmarkSomething) {
* std::vector<int> testCases { 0, 1, 1, 2, 3, 5 };
* for (int c : testCases) {
* doSomething(c);
* }
* return testCases.size();
* }
*/
#define BENCHMARK_MULTI(name, ...) \
BENCHMARK_MULTI_IMPL( \
name, \
FB_STRINGIZE(name), \
FB_ONE_OR_NONE(unsigned, ## __VA_ARGS__), \
__VA_ARGS__)
/**
* Defines a benchmark that passes a parameter to another one. This is
* common for benchmarks that need a "problem size" in addition to
* "number of iterations". Consider:
*
* void pushBack(uint n, size_t initialSize) {
* vector<int> v;
* BENCHMARK_SUSPEND {
* v.resize(initialSize);
* }
* FOR_EACH_RANGE (i, 0, n) {
* v.push_back(i);
* }
* }
* BENCHMARK_PARAM(pushBack, 0)
* BENCHMARK_PARAM(pushBack, 1000)
* BENCHMARK_PARAM(pushBack, 1000000)
*
* The benchmark above estimates the speed of push_back at different
* initial sizes of the vector. The framework will pass 0, 1000, and
* 1000000 for initialSize, and the iteration count for n.
*/
#define BENCHMARK_PARAM(name, param) \
BENCHMARK_NAMED_PARAM(name, param, param)
/**
* Same as BENCHMARK_PARAM, but allows one to return the actual number of
* iterations that have been run.
*/
#define BENCHMARK_PARAM_MULTI(name, param) \
BENCHMARK_NAMED_PARAM_MULTI(name, param, param)
/*
* Like BENCHMARK_PARAM(), but allows a custom name to be specified for each
* parameter, rather than using the parameter value.
*
* Useful when the parameter value is not a valid token for string pasting,
* of when you want to specify multiple parameter arguments.
*
* For example:
*
* void addValue(uint n, int64_t bucketSize, int64_t min, int64_t max) {
* Histogram<int64_t> hist(bucketSize, min, max);
* int64_t num = min;
* FOR_EACH_RANGE (i, 0, n) {
* hist.addValue(num);
* ++num;
* if (num > max) { num = min; }
* }
* }
*
* BENCHMARK_NAMED_PARAM(addValue, 0_to_100, 1, 0, 100)
* BENCHMARK_NAMED_PARAM(addValue, 0_to_1000, 10, 0, 1000)
* BENCHMARK_NAMED_PARAM(addValue, 5k_to_20k, 250, 5000, 20000)
*/
#define BENCHMARK_NAMED_PARAM(name, param_name, ...) \
BENCHMARK_IMPL( \
FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \
FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")", \
iters, \
unsigned, \
iters) { \
name(iters, ## __VA_ARGS__); \
}
/**
* Same as BENCHMARK_NAMED_PARAM, but allows one to return the actual number
* of iterations that have been run.
*/
#define BENCHMARK_NAMED_PARAM_MULTI(name, param_name, ...) \
BENCHMARK_MULTI_IMPL( \
FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \
FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")", \
unsigned, \
iters) { \
return name(iters, ## __VA_ARGS__); \
}
/**
* Just like BENCHMARK, but prints the time relative to a
* baseline. The baseline is the most recent BENCHMARK() seen in
* the current scope. Example:
*
* // This is the baseline
* BENCHMARK(insertVectorBegin, n) {
* vector<int> v;
* FOR_EACH_RANGE (i, 0, n) {
* v.insert(v.begin(), 42);
* }
* }
*
* BENCHMARK_RELATIVE(insertListBegin, n) {
* list<int> s;
* FOR_EACH_RANGE (i, 0, n) {
* s.insert(s.begin(), 42);
* }
* }
*
* Any number of relative benchmark can be associated with a
* baseline. Another BENCHMARK() occurrence effectively establishes a
* new baseline.
*/
#define BENCHMARK_RELATIVE(name, ...) \
BENCHMARK_IMPL( \
name, \
"%" FB_STRINGIZE(name), \
FB_ARG_2_OR_1(1, ## __VA_ARGS__), \
FB_ONE_OR_NONE(unsigned, ## __VA_ARGS__), \
__VA_ARGS__)
/**
* Same as BENCHMARK_RELATIVE, but allows one to return the actual number
* of iterations that have been run.
*/
#define BENCHMARK_RELATIVE_MULTI(name, ...) \
BENCHMARK_MULTI_IMPL( \
name, \
"%" FB_STRINGIZE(name), \
FB_ONE_OR_NONE(unsigned, ## __VA_ARGS__), \
__VA_ARGS__)
/**
* A combination of BENCHMARK_RELATIVE and BENCHMARK_PARAM.
*/
#define BENCHMARK_RELATIVE_PARAM(name, param) \
BENCHMARK_RELATIVE_NAMED_PARAM(name, param, param)
/**
* Same as BENCHMARK_RELATIVE_PARAM, but allows one to return the actual
* number of iterations that have been run.
*/
#define BENCHMARK_RELATIVE_PARAM_MULTI(name, param) \
BENCHMARK_RELATIVE_NAMED_PARAM_MULTI(name, param, param)
/**
* A combination of BENCHMARK_RELATIVE and BENCHMARK_NAMED_PARAM.
*/
#define BENCHMARK_RELATIVE_NAMED_PARAM(name, param_name, ...) \
BENCHMARK_IMPL( \
FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \
"%" FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")", \
iters, \
unsigned, \
iters) { \
name(iters, ## __VA_ARGS__); \
}
/**
* Same as BENCHMARK_RELATIVE_NAMED_PARAM, but allows one to return the
* actual number of iterations that have been run.
*/
#define BENCHMARK_RELATIVE_NAMED_PARAM_MULTI(name, param_name, ...) \
BENCHMARK_MULTI_IMPL( \
FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \
"%" FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")", \
unsigned, \
iters) { \
return name(iters, ## __VA_ARGS__); \
}
/**
* Draws a line of dashes.
*/
#define BENCHMARK_DRAW_LINE() \
static bool FB_ANONYMOUS_VARIABLE(follyBenchmarkUnused) = ( \
::folly::addBenchmark(__FILE__, "-", []() -> unsigned { return 0; }), \
true);
/**
* Allows execution of code that doesn't count torward the benchmark's
* time budget. Example:
*
* BENCHMARK_START_GROUP(insertVectorBegin, n) {
* vector<int> v;
* BENCHMARK_SUSPEND {
* v.reserve(n);
* }
* FOR_EACH_RANGE (i, 0, n) {
* v.insert(v.begin(), 42);
* }
* }
*/
#define BENCHMARK_SUSPEND \
if (auto FB_ANONYMOUS_VARIABLE(BENCHMARK_SUSPEND) = \
::folly::BenchmarkSuspender()) {} \
else

93
ios/Pods/Folly/folly/Bits.cpp generated Normal file
View File

@ -0,0 +1,93 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <folly/Bits.h>
#include <folly/CpuId.h>
#include <folly/Portability.h>
// None of this is necessary if we're compiling for a target that supports
// popcnt, which includes MSVC
#if !defined(__POPCNT__) && !defined(_MSC_VER)
namespace {
int popcount_builtin(unsigned int x) {
return __builtin_popcount(x);
}
int popcountll_builtin(unsigned long long x) {
return __builtin_popcountll(x);
}
#if FOLLY_HAVE_IFUNC && !defined(FOLLY_SANITIZE_ADDRESS)
// Strictly speaking, these versions of popcount are usable without ifunc
// support. However, we would have to check, via CpuId, if the processor
// implements the popcnt instruction first, which is what we use ifunc for.
int popcount_inst(unsigned int x) {
int n;
asm ("popcntl %1, %0" : "=r" (n) : "r" (x));
return n;
}
int popcountll_inst(unsigned long long x) {
unsigned long long n;
asm ("popcntq %1, %0" : "=r" (n) : "r" (x));
return n;
}
typedef decltype(popcount_builtin) Type_popcount;
typedef decltype(popcountll_builtin) Type_popcountll;
// This function is called on startup to resolve folly::detail::popcount
extern "C" Type_popcount* folly_popcount_ifunc() {
return folly::CpuId().popcnt() ? popcount_inst : popcount_builtin;
}
// This function is called on startup to resolve folly::detail::popcountll
extern "C" Type_popcountll* folly_popcountll_ifunc() {
return folly::CpuId().popcnt() ? popcountll_inst : popcountll_builtin;
}
#endif // FOLLY_HAVE_IFUNC && !defined(FOLLY_SANITIZE_ADDRESS)
} // namespace
namespace folly {
namespace detail {
// Call folly_popcount_ifunc on startup to resolve to either popcount_inst
// or popcount_builtin
int popcount(unsigned int x)
#if FOLLY_HAVE_IFUNC && !defined(FOLLY_SANITIZE_ADDRESS)
__attribute__((__ifunc__("folly_popcount_ifunc")));
#else
{ return popcount_builtin(x); }
#endif
// Call folly_popcount_ifunc on startup to resolve to either popcountll_inst
// or popcountll_builtin
int popcountll(unsigned long long x)
#if FOLLY_HAVE_IFUNC && !defined(FOLLY_SANITIZE_ADDRESS)
__attribute__((__ifunc__("folly_popcountll_ifunc")));
#else
{ return popcountll_builtin(x); }
#endif
} // namespace detail
} // namespace folly
#endif /* !__POPCNT__ */

571
ios/Pods/Folly/folly/Bits.h generated Normal file
View File

@ -0,0 +1,571 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Various low-level, bit-manipulation routines.
*
* findFirstSet(x) [constexpr]
* find first (least significant) bit set in a value of an integral type,
* 1-based (like ffs()). 0 = no bits are set (x == 0)
*
* findLastSet(x) [constexpr]
* find last (most significant) bit set in a value of an integral type,
* 1-based. 0 = no bits are set (x == 0)
* for x != 0, findLastSet(x) == 1 + floor(log2(x))
*
* nextPowTwo(x) [constexpr]
* Finds the next power of two >= x.
*
* isPowTwo(x) [constexpr]
* return true iff x is a power of two
*
* popcount(x)
* return the number of 1 bits in x
*
* Endian
* convert between native, big, and little endian representation
* Endian::big(x) big <-> native
* Endian::little(x) little <-> native
* Endian::swap(x) big <-> little
*
* BitIterator
* Wrapper around an iterator over an integral type that iterates
* over its underlying bits in MSb to LSb order
*
* findFirstSet(BitIterator begin, BitIterator end)
* return a BitIterator pointing to the first 1 bit in [begin, end), or
* end if all bits in [begin, end) are 0
*
* @author Tudor Bosman (tudorb@fb.com)
*/
#pragma once
#if !defined(__clang__) && !(defined(_MSC_VER) && (_MSC_VER < 1900))
#define FOLLY_INTRINSIC_CONSTEXPR constexpr
#else
// GCC and MSVC 2015+ are the only compilers with
// intrinsics constexpr.
#define FOLLY_INTRINSIC_CONSTEXPR const
#endif
#include <folly/Portability.h>
#include <folly/portability/Builtins.h>
#include <folly/Assume.h>
#include <folly/detail/BitsDetail.h>
#include <folly/detail/BitIteratorDetail.h>
#include <folly/Likely.h>
#if FOLLY_HAVE_BYTESWAP_H
# include <byteswap.h>
#endif
#include <cassert>
#include <cinttypes>
#include <iterator>
#include <limits>
#include <type_traits>
#include <boost/iterator/iterator_adaptor.hpp>
#include <stdint.h>
namespace folly {
// Generate overloads for findFirstSet as wrappers around
// appropriate ffs, ffsl, ffsll gcc builtins
template <class T>
inline FOLLY_INTRINSIC_CONSTEXPR
typename std::enable_if<
(std::is_integral<T>::value &&
std::is_unsigned<T>::value &&
sizeof(T) <= sizeof(unsigned int)),
unsigned int>::type
findFirstSet(T x) {
return __builtin_ffs(x);
}
template <class T>
inline FOLLY_INTRINSIC_CONSTEXPR
typename std::enable_if<
(std::is_integral<T>::value &&
std::is_unsigned<T>::value &&
sizeof(T) > sizeof(unsigned int) &&
sizeof(T) <= sizeof(unsigned long)),
unsigned int>::type
findFirstSet(T x) {
return __builtin_ffsl(x);
}
template <class T>
inline FOLLY_INTRINSIC_CONSTEXPR
typename std::enable_if<
(std::is_integral<T>::value &&
std::is_unsigned<T>::value &&
sizeof(T) > sizeof(unsigned long) &&
sizeof(T) <= sizeof(unsigned long long)),
unsigned int>::type
findFirstSet(T x) {
return __builtin_ffsll(x);
}
template <class T>
inline FOLLY_INTRINSIC_CONSTEXPR
typename std::enable_if<
(std::is_integral<T>::value && std::is_signed<T>::value),
unsigned int>::type
findFirstSet(T x) {
// Note that conversion from a signed type to the corresponding unsigned
// type is technically implementation-defined, but will likely work
// on any impementation that uses two's complement.
return findFirstSet(static_cast<typename std::make_unsigned<T>::type>(x));
}
// findLastSet: return the 1-based index of the highest bit set
// for x > 0, findLastSet(x) == 1 + floor(log2(x))
template <class T>
inline FOLLY_INTRINSIC_CONSTEXPR
typename std::enable_if<
(std::is_integral<T>::value &&
std::is_unsigned<T>::value &&
sizeof(T) <= sizeof(unsigned int)),
unsigned int>::type
findLastSet(T x) {
// If X is a power of two X - Y = ((X - 1) ^ Y) + 1. Doing this transformation
// allows GCC to remove its own xor that it adds to implement clz using bsr
return x ? ((8 * sizeof(unsigned int) - 1) ^ __builtin_clz(x)) + 1 : 0;
}
template <class T>
inline FOLLY_INTRINSIC_CONSTEXPR
typename std::enable_if<
(std::is_integral<T>::value &&
std::is_unsigned<T>::value &&
sizeof(T) > sizeof(unsigned int) &&
sizeof(T) <= sizeof(unsigned long)),
unsigned int>::type
findLastSet(T x) {
return x ? ((8 * sizeof(unsigned long) - 1) ^ __builtin_clzl(x)) + 1 : 0;
}
template <class T>
inline FOLLY_INTRINSIC_CONSTEXPR
typename std::enable_if<
(std::is_integral<T>::value &&
std::is_unsigned<T>::value &&
sizeof(T) > sizeof(unsigned long) &&
sizeof(T) <= sizeof(unsigned long long)),
unsigned int>::type
findLastSet(T x) {
return x ? ((8 * sizeof(unsigned long long) - 1) ^ __builtin_clzll(x)) + 1
: 0;
}
template <class T>
inline FOLLY_INTRINSIC_CONSTEXPR
typename std::enable_if<
(std::is_integral<T>::value &&
std::is_signed<T>::value),
unsigned int>::type
findLastSet(T x) {
return findLastSet(static_cast<typename std::make_unsigned<T>::type>(x));
}
template <class T>
inline FOLLY_INTRINSIC_CONSTEXPR
typename std::enable_if<
std::is_integral<T>::value && std::is_unsigned<T>::value,
T>::type
nextPowTwo(T v) {
return v ? (T(1) << findLastSet(v - 1)) : 1;
}
template <class T>
inline FOLLY_INTRINSIC_CONSTEXPR typename std::
enable_if<std::is_integral<T>::value && std::is_unsigned<T>::value, T>::type
prevPowTwo(T v) {
return v ? (T(1) << (findLastSet(v) - 1)) : 0;
}
template <class T>
inline constexpr typename std::enable_if<
std::is_integral<T>::value && std::is_unsigned<T>::value,
bool>::type
isPowTwo(T v) {
return (v != 0) && !(v & (v - 1));
}
/**
* Population count
*/
template <class T>
inline typename std::enable_if<
(std::is_integral<T>::value &&
std::is_unsigned<T>::value &&
sizeof(T) <= sizeof(unsigned int)),
size_t>::type
popcount(T x) {
return detail::popcount(x);
}
template <class T>
inline typename std::enable_if<
(std::is_integral<T>::value &&
std::is_unsigned<T>::value &&
sizeof(T) > sizeof(unsigned int) &&
sizeof(T) <= sizeof(unsigned long long)),
size_t>::type
popcount(T x) {
return detail::popcountll(x);
}
/**
* Endianness detection and manipulation primitives.
*/
namespace detail {
template <class T>
struct EndianIntBase {
public:
static T swap(T x);
};
#ifndef _MSC_VER
/**
* If we have the bswap_16 macro from byteswap.h, use it; otherwise, provide our
* own definition.
*/
#ifdef bswap_16
# define our_bswap16 bswap_16
#else
template<class Int16>
inline constexpr typename std::enable_if<
sizeof(Int16) == 2,
Int16>::type
our_bswap16(Int16 x) {
return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
}
#endif
#endif
#define FB_GEN(t, fn) \
template<> inline t EndianIntBase<t>::swap(t x) { return fn(x); }
// fn(x) expands to (x) if the second argument is empty, which is exactly
// what we want for [u]int8_t. Also, gcc 4.7 on Intel doesn't have
// __builtin_bswap16 for some reason, so we have to provide our own.
FB_GEN( int8_t,)
FB_GEN(uint8_t,)
#ifdef _MSC_VER
FB_GEN( int64_t, _byteswap_uint64)
FB_GEN(uint64_t, _byteswap_uint64)
FB_GEN( int32_t, _byteswap_ulong)
FB_GEN(uint32_t, _byteswap_ulong)
FB_GEN( int16_t, _byteswap_ushort)
FB_GEN(uint16_t, _byteswap_ushort)
#else
FB_GEN( int64_t, __builtin_bswap64)
FB_GEN(uint64_t, __builtin_bswap64)
FB_GEN( int32_t, __builtin_bswap32)
FB_GEN(uint32_t, __builtin_bswap32)
FB_GEN( int16_t, our_bswap16)
FB_GEN(uint16_t, our_bswap16)
#endif
#undef FB_GEN
template <class T>
struct EndianInt : public EndianIntBase<T> {
public:
static T big(T x) {
return kIsLittleEndian ? EndianInt::swap(x) : x;
}
static T little(T x) {
return kIsBigEndian ? EndianInt::swap(x) : x;
}
};
} // namespace detail
// big* convert between native and big-endian representations
// little* convert between native and little-endian representations
// swap* convert between big-endian and little-endian representations
//
// ntohs, htons == big16
// ntohl, htonl == big32
#define FB_GEN1(fn, t, sz) \
static t fn##sz(t x) { return fn<t>(x); } \
#define FB_GEN2(t, sz) \
FB_GEN1(swap, t, sz) \
FB_GEN1(big, t, sz) \
FB_GEN1(little, t, sz)
#define FB_GEN(sz) \
FB_GEN2(uint##sz##_t, sz) \
FB_GEN2(int##sz##_t, sz)
class Endian {
public:
enum class Order : uint8_t {
LITTLE,
BIG
};
static constexpr Order order = kIsLittleEndian ? Order::LITTLE : Order::BIG;
template <class T> static T swap(T x) {
return folly::detail::EndianInt<T>::swap(x);
}
template <class T> static T big(T x) {
return folly::detail::EndianInt<T>::big(x);
}
template <class T> static T little(T x) {
return folly::detail::EndianInt<T>::little(x);
}
#if !defined(__ANDROID__)
FB_GEN(64)
FB_GEN(32)
FB_GEN(16)
FB_GEN(8)
#endif
};
#undef FB_GEN
#undef FB_GEN2
#undef FB_GEN1
/**
* Fast bit iteration facility.
*/
template <class BaseIter> class BitIterator;
template <class BaseIter>
BitIterator<BaseIter> findFirstSet(BitIterator<BaseIter>,
BitIterator<BaseIter>);
/**
* Wrapper around an iterator over an integer type that iterates
* over its underlying bits in LSb to MSb order.
*
* BitIterator models the same iterator concepts as the base iterator.
*/
template <class BaseIter>
class BitIterator
: public bititerator_detail::BitIteratorBase<BaseIter>::type {
public:
/**
* Return the number of bits in an element of the underlying iterator.
*/
static unsigned int bitsPerBlock() {
return std::numeric_limits<
typename std::make_unsigned<
typename std::iterator_traits<BaseIter>::value_type
>::type
>::digits;
}
/**
* Construct a BitIterator that points at a given bit offset (default 0)
* in iter.
*/
explicit BitIterator(const BaseIter& iter, size_t bitOff=0)
: bititerator_detail::BitIteratorBase<BaseIter>::type(iter),
bitOffset_(bitOff) {
assert(bitOffset_ < bitsPerBlock());
}
size_t bitOffset() const {
return bitOffset_;
}
void advanceToNextBlock() {
bitOffset_ = 0;
++this->base_reference();
}
BitIterator& operator=(const BaseIter& other) {
this->~BitIterator();
new (this) BitIterator(other);
return *this;
}
private:
friend class boost::iterator_core_access;
friend BitIterator findFirstSet<>(BitIterator, BitIterator);
typedef bititerator_detail::BitReference<
typename std::iterator_traits<BaseIter>::reference,
typename std::iterator_traits<BaseIter>::value_type
> BitRef;
void advanceInBlock(size_t n) {
bitOffset_ += n;
assert(bitOffset_ < bitsPerBlock());
}
BitRef dereference() const {
return BitRef(*this->base_reference(), bitOffset_);
}
void advance(ssize_t n) {
size_t bpb = bitsPerBlock();
ssize_t blocks = n / bpb;
bitOffset_ += n % bpb;
if (bitOffset_ >= bpb) {
bitOffset_ -= bpb;
++blocks;
}
this->base_reference() += blocks;
}
void increment() {
if (++bitOffset_ == bitsPerBlock()) {
advanceToNextBlock();
}
}
void decrement() {
if (bitOffset_-- == 0) {
bitOffset_ = bitsPerBlock() - 1;
--this->base_reference();
}
}
bool equal(const BitIterator& other) const {
return (bitOffset_ == other.bitOffset_ &&
this->base_reference() == other.base_reference());
}
ssize_t distance_to(const BitIterator& other) const {
return
(other.base_reference() - this->base_reference()) * bitsPerBlock() +
other.bitOffset_ - bitOffset_;
}
unsigned int bitOffset_;
};
/**
* Helper function, so you can write
* auto bi = makeBitIterator(container.begin());
*/
template <class BaseIter>
BitIterator<BaseIter> makeBitIterator(const BaseIter& iter) {
return BitIterator<BaseIter>(iter);
}
/**
* Find first bit set in a range of bit iterators.
* 4.5x faster than the obvious std::find(begin, end, true);
*/
template <class BaseIter>
BitIterator<BaseIter> findFirstSet(BitIterator<BaseIter> begin,
BitIterator<BaseIter> end) {
// shortcut to avoid ugly static_cast<>
static const typename BaseIter::value_type one = 1;
while (begin.base() != end.base()) {
typename BaseIter::value_type v = *begin.base();
// mask out the bits that don't matter (< begin.bitOffset)
v &= ~((one << begin.bitOffset()) - 1);
size_t firstSet = findFirstSet(v);
if (firstSet) {
--firstSet; // now it's 0-based
assert(firstSet >= begin.bitOffset());
begin.advanceInBlock(firstSet - begin.bitOffset());
return begin;
}
begin.advanceToNextBlock();
}
// now begin points to the same block as end
if (end.bitOffset() != 0) { // assume end is dereferenceable
typename BaseIter::value_type v = *begin.base();
// mask out the bits that don't matter (< begin.bitOffset)
v &= ~((one << begin.bitOffset()) - 1);
// mask out the bits that don't matter (>= end.bitOffset)
v &= (one << end.bitOffset()) - 1;
size_t firstSet = findFirstSet(v);
if (firstSet) {
--firstSet; // now it's 0-based
assert(firstSet >= begin.bitOffset());
begin.advanceInBlock(firstSet - begin.bitOffset());
return begin;
}
}
return end;
}
template <class T, class Enable=void> struct Unaligned;
/**
* Representation of an unaligned value of a POD type.
*/
FOLLY_PACK_PUSH
template <class T>
struct Unaligned<
T,
typename std::enable_if<std::is_pod<T>::value>::type> {
Unaligned() = default; // uninitialized
/* implicit */ Unaligned(T v) : value(v) { }
T value;
} FOLLY_PACK_ATTR;
FOLLY_PACK_POP
/**
* Read an unaligned value of type T and return it.
*/
template <class T>
inline T loadUnaligned(const void* p) {
static_assert(sizeof(Unaligned<T>) == sizeof(T), "Invalid unaligned size");
static_assert(alignof(Unaligned<T>) == 1, "Invalid alignment");
if (kHasUnalignedAccess) {
return static_cast<const Unaligned<T>*>(p)->value;
} else {
T value;
memcpy(&value, p, sizeof(T));
return value;
}
}
/**
* Write an unaligned value of type T.
*/
template <class T>
inline void storeUnaligned(void* p, T value) {
static_assert(sizeof(Unaligned<T>) == sizeof(T), "Invalid unaligned size");
static_assert(alignof(Unaligned<T>) == 1, "Invalid alignment");
if (kHasUnalignedAccess) {
// Prior to C++14, the spec says that a placement new like this
// is required to check that p is not nullptr, and to do nothing
// if p is a nullptr. By assuming it's not a nullptr, we get a
// nice loud segfault in optimized builds if p is nullptr, rather
// than just silently doing nothing.
folly::assume(p != nullptr);
new (p) Unaligned<T>(value);
} else {
memcpy(p, &value, sizeof(T));
}
}
} // namespace folly

109
ios/Pods/Folly/folly/CPortability.h generated Normal file
View File

@ -0,0 +1,109 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
/* These definitions are in a separate file so that they
* may be included from C- as well as C++-based projects. */
/**
* Portable version check.
*/
#ifndef __GNUC_PREREQ
# if defined __GNUC__ && defined __GNUC_MINOR__
/* nolint */
# define __GNUC_PREREQ(maj, min) ((__GNUC__ << 16) + __GNUC_MINOR__ >= \
((maj) << 16) + (min))
# else
/* nolint */
# define __GNUC_PREREQ(maj, min) 0
# endif
#endif
/* Define a convenience macro to test when address sanitizer is being used
* across the different compilers (e.g. clang, gcc) */
#if defined(__clang__)
# if __has_feature(address_sanitizer)
# define FOLLY_SANITIZE_ADDRESS 1
# endif
#elif defined (__GNUC__) && \
(((__GNUC__ == 4) && (__GNUC_MINOR__ >= 8)) || (__GNUC__ >= 5)) && \
__SANITIZE_ADDRESS__
# define FOLLY_SANITIZE_ADDRESS 1
#endif
/* Define attribute wrapper for function attribute used to disable
* address sanitizer instrumentation. Unfortunately, this attribute
* has issues when inlining is used, so disable that as well. */
#ifdef FOLLY_SANITIZE_ADDRESS
# if defined(__clang__)
# if __has_attribute(__no_sanitize__)
# define FOLLY_DISABLE_ADDRESS_SANITIZER \
__attribute__((__no_sanitize__("address"), __noinline__))
# elif __has_attribute(__no_address_safety_analysis__)
# define FOLLY_DISABLE_ADDRESS_SANITIZER \
__attribute__((__no_address_safety_analysis__, __noinline__))
# elif __has_attribute(__no_sanitize_address__)
# define FOLLY_DISABLE_ADDRESS_SANITIZER \
__attribute__((__no_sanitize_address__, __noinline__))
# endif
# elif defined(__GNUC__)
# define FOLLY_DISABLE_ADDRESS_SANITIZER \
__attribute__((__no_address_safety_analysis__, __noinline__))
# endif
#endif
#ifndef FOLLY_DISABLE_ADDRESS_SANITIZER
# define FOLLY_DISABLE_ADDRESS_SANITIZER
#endif
/* Define a convenience macro to test when thread sanitizer is being used
* across the different compilers (e.g. clang, gcc) */
#if defined(__clang__)
# if __has_feature(thread_sanitizer)
# define FOLLY_SANITIZE_THREAD 1
# endif
#elif defined(__GNUC__) && __SANITIZE_THREAD__
# define FOLLY_SANITIZE_THREAD 1
#endif
/**
* ASAN/MSAN/TSAN define pre-processor symbols:
* ADDRESS_SANITIZER/MEMORY_SANITIZER/THREAD_SANITIZER.
*
* UBSAN doesn't define anything and makes it hard to
* conditionally compile.
*
* The build system should define UNDEFINED_SANITIZER=1 when UBSAN is
* used as folly whitelists some functions.
*/
#if UNDEFINED_SANITIZER
# define UBSAN_DISABLE(x) __attribute__((no_sanitize(x)))
#else
# define UBSAN_DISABLE(x)
#endif // UNDEFINED_SANITIZER
/**
* Macro for marking functions as having public visibility.
*/
#if defined(__GNUC__)
# if __GNUC_PREREQ(4, 9)
# define FOLLY_EXPORT [[gnu::visibility("default")]]
# else
# define FOLLY_EXPORT __attribute__((__visibility__("default")))
# endif
#else
# define FOLLY_EXPORT
#endif

82
ios/Pods/Folly/folly/CachelinePadded.h generated Normal file
View File

@ -0,0 +1,82 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/detail/CachelinePaddedImpl.h>
namespace folly {
/**
* Holds a type T, in addition to enough padding to round the size up to the
* next multiple of the false sharing range used by folly.
*
* If T is standard-layout, then casting a T* you get from this class to a
* CachelinePadded<T>* is safe.
*
* This class handles padding, but imperfectly handles alignment. (Note that
* alignment matters for false-sharing: imagine a cacheline size of 64, and two
* adjacent 64-byte objects, with the first starting at an offset of 32. The
* last 32 bytes of the first object share a cacheline with the first 32 bytes
* of the second.). We alignas this class to be at least cacheline-sized, but
* it's implementation-defined what that means (since a cacheline is almost
* certainly larger than the maximum natural alignment). The following should be
* true for recent compilers on common architectures:
*
* For heap objects, alignment needs to be handled at the allocator level, such
* as with posix_memalign (this isn't necessary with jemalloc, which aligns
* objects that are a multiple of cacheline size to a cacheline).
*
* For static and stack objects, the alignment should be obeyed, and no specific
* intervention is necessary.
*/
template <typename T>
class CachelinePadded {
public:
template <typename... Args>
explicit CachelinePadded(Args&&... args)
: impl_(std::forward<Args>(args)...) {}
CachelinePadded() {}
T* get() {
return &impl_.item;
}
const T* get() const {
return &impl_.item;
}
T* operator->() {
return get();
}
const T* operator->() const {
return get();
}
T& operator*() {
return *get();
}
const T& operator*() const {
return *get();
}
private:
detail::CachelinePaddedImpl<T> impl_;
};
}

79
ios/Pods/Folly/folly/CallOnce.h generated Normal file
View File

@ -0,0 +1,79 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Drop-in replacement for std::call_once() with a fast path, which the GCC
* implementation lacks. The tradeoff is a slightly larger `once_flag' struct
* (8 bytes vs 4 bytes with GCC on Linux/x64).
*
* $ call_once_test --benchmark --bm_min_iters=100000000 --threads=16
* ============================================================================
* folly/test/CallOnceTest.cpp relative time/iter iters/s
* ============================================================================
* StdCallOnceBench 3.54ns 282.82M
* FollyCallOnceBench 698.48ps 1.43G
* ============================================================================
*/
#pragma once
#include <atomic>
#include <mutex>
#include <utility>
#include <folly/Likely.h>
#include <folly/Portability.h>
namespace folly {
class once_flag {
public:
constexpr once_flag() noexcept = default;
once_flag(const once_flag&) = delete;
once_flag& operator=(const once_flag&) = delete;
template <typename Callable, class... Args>
friend void call_once(once_flag& flag, Callable&& f, Args&&... args);
template <typename Callable, class... Args>
friend void call_once_impl_no_inline(once_flag& flag,
Callable&& f,
Args&&... args);
private:
std::atomic<bool> called_{false};
std::once_flag std_once_flag_;
};
template <class Callable, class... Args>
void FOLLY_ALWAYS_INLINE
call_once(once_flag& flag, Callable&& f, Args&&... args) {
if (LIKELY(flag.called_.load(std::memory_order_acquire))) {
return;
}
call_once_impl_no_inline(
flag, std::forward<Callable>(f), std::forward<Args>(args)...);
}
// Implementation detail: out-of-line slow path
template <class Callable, class... Args>
void FOLLY_NOINLINE
call_once_impl_no_inline(once_flag& flag, Callable&& f, Args&&... args) {
std::call_once(flag.std_once_flag_,
std::forward<Callable>(f),
std::forward<Args>(args)...);
flag.called_.store(true, std::memory_order_release);
}
}

40
ios/Pods/Folly/folly/Checksum.h generated Normal file
View File

@ -0,0 +1,40 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <stdint.h>
#include <cstddef>
/*
* Checksum functions
*/
namespace folly {
/**
* Compute the CRC-32C checksum of a buffer, using a hardware-accelerated
* implementation if available or a portable software implementation as
* a default.
*
* @note CRC-32C is different from CRC-32; CRC-32C starts with a different
* polynomial and thus yields different results for the same input
* than a traditional CRC-32.
*/
uint32_t crc32c(const uint8_t* data, size_t nbytes,
uint32_t startingChecksum = ~0U);
} // folly

View File

@ -0,0 +1,29 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/portability/Time.h>
#include <time.h>
namespace folly {
namespace chrono {
extern int (*clock_gettime)(clockid_t, timespec* ts);
extern int64_t (*clock_gettime_ns)(clockid_t);
}
}

View File

@ -0,0 +1,334 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// @author: Xin Liu <xliux@fb.com>
#pragma once
#include <algorithm>
#include <atomic>
#include <climits>
#include <cmath>
#include <memory>
#include <mutex>
#include <type_traits>
#include <vector>
#include <boost/noncopyable.hpp>
#include <boost/random.hpp>
#include <boost/type_traits.hpp>
#include <glog/logging.h>
#include <folly/Memory.h>
#include <folly/MicroSpinLock.h>
#include <folly/ThreadLocal.h>
namespace folly { namespace detail {
template<typename ValT, typename NodeT> class csl_iterator;
template<typename T>
class SkipListNode : private boost::noncopyable {
enum {
IS_HEAD_NODE = 1,
MARKED_FOR_REMOVAL = (1 << 1),
FULLY_LINKED = (1 << 2),
};
public:
typedef T value_type;
template<typename NodeAlloc, typename U,
typename=typename std::enable_if<std::is_convertible<U, T>::value>::type>
static SkipListNode* create(
NodeAlloc& alloc, int height, U&& data, bool isHead = false) {
DCHECK(height >= 1 && height < 64) << height;
size_t size = sizeof(SkipListNode) +
height * sizeof(std::atomic<SkipListNode*>);
auto* node = static_cast<SkipListNode*>(alloc.allocate(size));
// do placement new
new (node) SkipListNode(height, std::forward<U>(data), isHead);
return node;
}
template<typename NodeAlloc>
static void destroy(NodeAlloc& alloc, SkipListNode* node) {
node->~SkipListNode();
alloc.deallocate(node);
}
template<typename NodeAlloc>
static constexpr bool destroyIsNoOp() {
return IsArenaAllocator<NodeAlloc>::value &&
boost::has_trivial_destructor<SkipListNode>::value;
}
// copy the head node to a new head node assuming lock acquired
SkipListNode* copyHead(SkipListNode* node) {
DCHECK(node != nullptr && height_ > node->height_);
setFlags(node->getFlags());
for (int i = 0; i < node->height_; ++i) {
setSkip(i, node->skip(i));
}
return this;
}
inline SkipListNode* skip(int layer) const {
DCHECK_LT(layer, height_);
return skip_[layer].load(std::memory_order_consume);
}
// next valid node as in the linked list
SkipListNode* next() {
SkipListNode* node;
for (node = skip(0);
(node != nullptr && node->markedForRemoval());
node = node->skip(0)) {}
return node;
}
void setSkip(uint8_t h, SkipListNode* next) {
DCHECK_LT(h, height_);
skip_[h].store(next, std::memory_order_release);
}
value_type& data() { return data_; }
const value_type& data() const { return data_; }
int maxLayer() const { return height_ - 1; }
int height() const { return height_; }
std::unique_lock<MicroSpinLock> acquireGuard() {
return std::unique_lock<MicroSpinLock>(spinLock_);
}
bool fullyLinked() const { return getFlags() & FULLY_LINKED; }
bool markedForRemoval() const { return getFlags() & MARKED_FOR_REMOVAL; }
bool isHeadNode() const { return getFlags() & IS_HEAD_NODE; }
void setIsHeadNode() {
setFlags(getFlags() | IS_HEAD_NODE);
}
void setFullyLinked() {
setFlags(getFlags() | FULLY_LINKED);
}
void setMarkedForRemoval() {
setFlags(getFlags() | MARKED_FOR_REMOVAL);
}
private:
// Note! this can only be called from create() as a placement new.
template<typename U>
SkipListNode(uint8_t height, U&& data, bool isHead) :
height_(height), data_(std::forward<U>(data)) {
spinLock_.init();
setFlags(0);
if (isHead) setIsHeadNode();
// need to explicitly init the dynamic atomic pointer array
for (uint8_t i = 0; i < height_; ++i) {
new (&skip_[i]) std::atomic<SkipListNode*>(nullptr);
}
}
~SkipListNode() {
for (uint8_t i = 0; i < height_; ++i) {
skip_[i].~atomic();
}
}
uint16_t getFlags() const {
return flags_.load(std::memory_order_consume);
}
void setFlags(uint16_t flags) {
flags_.store(flags, std::memory_order_release);
}
// TODO(xliu): on x86_64, it's possible to squeeze these into
// skip_[0] to maybe save 8 bytes depending on the data alignments.
// NOTE: currently this is x86_64 only anyway, due to the
// MicroSpinLock.
std::atomic<uint16_t> flags_;
const uint8_t height_;
MicroSpinLock spinLock_;
value_type data_;
std::atomic<SkipListNode*> skip_[0];
};
class SkipListRandomHeight {
enum { kMaxHeight = 64 };
public:
// make it a singleton.
static SkipListRandomHeight *instance() {
static SkipListRandomHeight instance_;
return &instance_;
}
int getHeight(int maxHeight) const {
DCHECK_LE(maxHeight, kMaxHeight) << "max height too big!";
double p = randomProb();
for (int i = 0; i < maxHeight; ++i) {
if (p < lookupTable_[i]) {
return i + 1;
}
}
return maxHeight;
}
size_t getSizeLimit(int height) const {
DCHECK_LT(height, kMaxHeight);
return sizeLimitTable_[height];
}
private:
SkipListRandomHeight() { initLookupTable(); }
void initLookupTable() {
// set skip prob = 1/E
static const double kProbInv = exp(1);
static const double kProb = 1.0 / kProbInv;
static const size_t kMaxSizeLimit = std::numeric_limits<size_t>::max();
double sizeLimit = 1;
double p = lookupTable_[0] = (1 - kProb);
sizeLimitTable_[0] = 1;
for (int i = 1; i < kMaxHeight - 1; ++i) {
p *= kProb;
sizeLimit *= kProbInv;
lookupTable_[i] = lookupTable_[i - 1] + p;
sizeLimitTable_[i] = sizeLimit > kMaxSizeLimit ?
kMaxSizeLimit :
static_cast<size_t>(sizeLimit);
}
lookupTable_[kMaxHeight - 1] = 1;
sizeLimitTable_[kMaxHeight - 1] = kMaxSizeLimit;
}
static double randomProb() {
static ThreadLocal<boost::lagged_fibonacci2281> rng_;
return (*rng_)();
}
double lookupTable_[kMaxHeight];
size_t sizeLimitTable_[kMaxHeight];
};
template<typename NodeType, typename NodeAlloc, typename = void>
class NodeRecycler;
template<typename NodeType, typename NodeAlloc>
class NodeRecycler<NodeType, NodeAlloc, typename std::enable_if<
!NodeType::template destroyIsNoOp<NodeAlloc>()>::type> {
public:
explicit NodeRecycler(const NodeAlloc& alloc)
: refs_(0), dirty_(false), alloc_(alloc) { lock_.init(); }
explicit NodeRecycler() : refs_(0), dirty_(false) { lock_.init(); }
~NodeRecycler() {
CHECK_EQ(refs(), 0);
if (nodes_) {
for (auto& node : *nodes_) {
NodeType::destroy(alloc_, node);
}
}
}
void add(NodeType* node) {
std::lock_guard<MicroSpinLock> g(lock_);
if (nodes_.get() == nullptr) {
nodes_.reset(new std::vector<NodeType*>(1, node));
} else {
nodes_->push_back(node);
}
DCHECK_GT(refs(), 0);
dirty_.store(true, std::memory_order_relaxed);
}
int addRef() {
return refs_.fetch_add(1, std::memory_order_relaxed);
}
int releaseRef() {
// We don't expect to clean the recycler immediately everytime it is OK
// to do so. Here, it is possible that multiple accessors all release at
// the same time but nobody would clean the recycler here. If this
// happens, the recycler will usually still get cleaned when
// such a race doesn't happen. The worst case is the recycler will
// eventually get deleted along with the skiplist.
if (LIKELY(!dirty_.load(std::memory_order_relaxed) || refs() > 1)) {
return refs_.fetch_add(-1, std::memory_order_relaxed);
}
std::unique_ptr<std::vector<NodeType*>> newNodes;
{
std::lock_guard<MicroSpinLock> g(lock_);
if (nodes_.get() == nullptr || refs() > 1) {
return refs_.fetch_add(-1, std::memory_order_relaxed);
}
// once refs_ reaches 1 and there is no other accessor, it is safe to
// remove all the current nodes in the recycler, as we already acquired
// the lock here so no more new nodes can be added, even though new
// accessors may be added after that.
newNodes.swap(nodes_);
dirty_.store(false, std::memory_order_relaxed);
}
// TODO(xliu) should we spawn a thread to do this when there are large
// number of nodes in the recycler?
for (auto& node : *newNodes) {
NodeType::destroy(alloc_, node);
}
// decrease the ref count at the very end, to minimize the
// chance of other threads acquiring lock_ to clear the deleted
// nodes again.
return refs_.fetch_add(-1, std::memory_order_relaxed);
}
NodeAlloc& alloc() { return alloc_; }
private:
int refs() const {
return refs_.load(std::memory_order_relaxed);
}
std::unique_ptr<std::vector<NodeType*>> nodes_;
std::atomic<int32_t> refs_; // current number of visitors to the list
std::atomic<bool> dirty_; // whether *nodes_ is non-empty
MicroSpinLock lock_; // protects access to *nodes_
NodeAlloc alloc_;
};
// In case of arena allocator, no recycling is necessary, and it's possible
// to save on ConcurrentSkipList size.
template<typename NodeType, typename NodeAlloc>
class NodeRecycler<NodeType, NodeAlloc, typename std::enable_if<
NodeType::template destroyIsNoOp<NodeAlloc>()>::type> {
public:
explicit NodeRecycler(const NodeAlloc& alloc) : alloc_(alloc) { }
void addRef() { }
void releaseRef() { }
void add(NodeType* /* node */) {}
NodeAlloc& alloc() { return alloc_; }
private:
NodeAlloc alloc_;
};
}} // namespaces

801
ios/Pods/Folly/folly/ConcurrentSkipList.h generated Normal file
View File

@ -0,0 +1,801 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// @author: Xin Liu <xliux@fb.com>
//
// A concurrent skip list (CSL) implementation.
// Ref: http://www.cs.tau.ac.il/~shanir/nir-pubs-web/Papers/OPODIS2006-BA.pdf
/*
This implements a sorted associative container that supports only
unique keys. (Similar to std::set.)
Features:
1. Small memory overhead: ~40% less memory overhead compared with
std::set (1.6 words per node versus 3). It has an minimum of 4
words (7 words if there nodes got deleted) per-list overhead
though.
2. Read accesses (count, find iterator, skipper) are lock-free and
mostly wait-free (the only wait a reader may need to do is when
the node it is visiting is in a pending stage, i.e. deleting,
adding and not fully linked). Write accesses (remove, add) need
to acquire locks, but locks are local to the predecessor nodes
and/or successor nodes.
3. Good high contention performance, comparable single-thread
performance. In the multithreaded case (12 workers), CSL tested
10x faster than a RWSpinLocked std::set for an averaged sized
list (1K - 1M nodes).
Comparable read performance to std::set when single threaded,
especially when the list size is large, and scales better to
larger lists: when the size is small, CSL can be 20-50% slower on
find()/contains(). As the size gets large (> 1M elements),
find()/contains() can be 30% faster.
Iterating through a skiplist is similar to iterating through a
linked list, thus is much (2-6x) faster than on a std::set
(tree-based). This is especially true for short lists due to
better cache locality. Based on that, it's also faster to
intersect two skiplists.
4. Lazy removal with GC support. The removed nodes get deleted when
the last Accessor to the skiplist is destroyed.
Caveats:
1. Write operations are usually 30% slower than std::set in a single
threaded environment.
2. Need to have a head node for each list, which has a 4 word
overhead.
3. When the list is quite small (< 1000 elements), single threaded
benchmarks show CSL can be 10x slower than std:set.
4. The interface requires using an Accessor to access the skiplist.
(See below.)
5. Currently x64 only, due to use of MicroSpinLock.
6. Freed nodes will not be reclaimed as long as there are ongoing
uses of the list.
Sample usage:
typedef ConcurrentSkipList<int> SkipListT;
shared_ptr<SkipListT> sl(SkipListT::createInstance(init_head_height);
{
// It's usually good practice to hold an accessor only during
// its necessary life cycle (but not in a tight loop as
// Accessor creation incurs ref-counting overhead).
//
// Holding it longer delays garbage-collecting the deleted
// nodes in the list.
SkipListT::Accessor accessor(sl);
accessor.insert(23);
accessor.erase(2);
for (auto &elem : accessor) {
// use elem to access data
}
... ...
}
Another useful type is the Skipper accessor. This is useful if you
want to skip to locations in the way std::lower_bound() works,
i.e. it can be used for going through the list by skipping to the
node no less than a specified key. The Skipper keeps its location as
state, which makes it convenient for things like implementing
intersection of two sets efficiently, as it can start from the last
visited position.
{
SkipListT::Accessor accessor(sl);
SkipListT::Skipper skipper(accessor);
skipper.to(30);
if (skipper) {
CHECK_LE(30, *skipper);
}
... ...
// GC may happen when the accessor gets destructed.
}
*/
#pragma once
#include <algorithm>
#include <atomic>
#include <limits>
#include <memory>
#include <type_traits>
#include <boost/iterator/iterator_facade.hpp>
#include <glog/logging.h>
#include <folly/ConcurrentSkipList-inl.h>
#include <folly/Likely.h>
#include <folly/Memory.h>
#include <folly/MicroSpinLock.h>
namespace folly {
template<typename T,
typename Comp = std::less<T>,
// All nodes are allocated using provided SimpleAllocator,
// it should be thread-safe.
typename NodeAlloc = SysAlloc,
int MAX_HEIGHT = 24>
class ConcurrentSkipList {
// MAX_HEIGHT needs to be at least 2 to suppress compiler
// warnings/errors (Werror=uninitialized tiggered due to preds_[1]
// being treated as a scalar in the compiler).
static_assert(MAX_HEIGHT >= 2 && MAX_HEIGHT < 64,
"MAX_HEIGHT can only be in the range of [2, 64)");
typedef std::unique_lock<folly::MicroSpinLock> ScopedLocker;
typedef ConcurrentSkipList<T, Comp, NodeAlloc, MAX_HEIGHT> SkipListType;
public:
typedef detail::SkipListNode<T> NodeType;
typedef T value_type;
typedef T key_type;
typedef detail::csl_iterator<value_type, NodeType> iterator;
typedef detail::csl_iterator<const value_type, const NodeType> const_iterator;
class Accessor;
class Skipper;
explicit ConcurrentSkipList(int height, const NodeAlloc& alloc)
: recycler_(alloc),
head_(NodeType::create(recycler_.alloc(), height, value_type(), true)),
size_(0) {}
explicit ConcurrentSkipList(int height)
: recycler_(),
head_(NodeType::create(recycler_.alloc(), height, value_type(), true)),
size_(0) {}
// Convenient function to get an Accessor to a new instance.
static Accessor create(int height, const NodeAlloc& alloc) {
return Accessor(createInstance(height, alloc));
}
static Accessor create(int height = 1) {
return Accessor(createInstance(height));
}
// Create a shared_ptr skiplist object with initial head height.
static std::shared_ptr<SkipListType> createInstance(int height,
const NodeAlloc& alloc) {
return std::make_shared<ConcurrentSkipList>(height, alloc);
}
static std::shared_ptr<SkipListType> createInstance(int height = 1) {
return std::make_shared<ConcurrentSkipList>(height);
}
//===================================================================
// Below are implementation details.
// Please see ConcurrentSkipList::Accessor for stdlib-like APIs.
//===================================================================
~ConcurrentSkipList() {
/* static */ if (NodeType::template destroyIsNoOp<NodeAlloc>()) {
// Avoid traversing the list if using arena allocator.
return;
}
for (NodeType* current = head_.load(std::memory_order_relaxed); current; ) {
NodeType* tmp = current->skip(0);
NodeType::destroy(recycler_.alloc(), current);
current = tmp;
}
}
private:
static bool greater(const value_type &data, const NodeType *node) {
return node && Comp()(node->data(), data);
}
static bool less(const value_type &data, const NodeType *node) {
return (node == nullptr) || Comp()(data, node->data());
}
static int findInsertionPoint(NodeType *cur, int cur_layer,
const value_type &data,
NodeType *preds[], NodeType *succs[]) {
int foundLayer = -1;
NodeType *pred = cur;
NodeType *foundNode = nullptr;
for (int layer = cur_layer; layer >= 0; --layer) {
NodeType *node = pred->skip(layer);
while (greater(data, node)) {
pred = node;
node = node->skip(layer);
}
if (foundLayer == -1 && !less(data, node)) { // the two keys equal
foundLayer = layer;
foundNode = node;
}
preds[layer] = pred;
// if found, succs[0..foundLayer] need to point to the cached foundNode,
// as foundNode might be deleted at the same time thus pred->skip() can
// return NULL or another node.
succs[layer] = foundNode ? foundNode : node;
}
return foundLayer;
}
size_t size() const { return size_.load(std::memory_order_relaxed); }
int height() const {
return head_.load(std::memory_order_consume)->height();
}
int maxLayer() const { return height() - 1; }
size_t incrementSize(int delta) {
return size_.fetch_add(delta, std::memory_order_relaxed) + delta;
}
// Returns the node if found, nullptr otherwise.
NodeType* find(const value_type &data) {
auto ret = findNode(data);
if (ret.second && !ret.first->markedForRemoval()) return ret.first;
return nullptr;
}
// lock all the necessary nodes for changing (adding or removing) the list.
// returns true if all the lock acquried successfully and the related nodes
// are all validate (not in certain pending states), false otherwise.
bool lockNodesForChange(int nodeHeight,
ScopedLocker guards[MAX_HEIGHT],
NodeType *preds[MAX_HEIGHT],
NodeType *succs[MAX_HEIGHT],
bool adding=true) {
NodeType *pred, *succ, *prevPred = nullptr;
bool valid = true;
for (int layer = 0; valid && layer < nodeHeight; ++layer) {
pred = preds[layer];
DCHECK(pred != nullptr) << "layer=" << layer << " height=" << height()
<< " nodeheight=" << nodeHeight;
succ = succs[layer];
if (pred != prevPred) {
guards[layer] = pred->acquireGuard();
prevPred = pred;
}
valid = !pred->markedForRemoval() &&
pred->skip(layer) == succ; // check again after locking
if (adding) { // when adding a node, the succ shouldn't be going away
valid = valid && (succ == nullptr || !succ->markedForRemoval());
}
}
return valid;
}
// Returns a paired value:
// pair.first always stores the pointer to the node with the same input key.
// It could be either the newly added data, or the existed data in the
// list with the same key.
// pair.second stores whether the data is added successfully:
// 0 means not added, otherwise reutrns the new size.
template<typename U>
std::pair<NodeType*, size_t> addOrGetData(U &&data) {
NodeType *preds[MAX_HEIGHT], *succs[MAX_HEIGHT];
NodeType *newNode;
size_t newSize;
while (true) {
int max_layer = 0;
int layer = findInsertionPointGetMaxLayer(data, preds, succs, &max_layer);
if (layer >= 0) {
NodeType *nodeFound = succs[layer];
DCHECK(nodeFound != nullptr);
if (nodeFound->markedForRemoval()) {
continue; // if it's getting deleted retry finding node.
}
// wait until fully linked.
while (UNLIKELY(!nodeFound->fullyLinked())) {}
return std::make_pair(nodeFound, 0);
}
// need to capped at the original height -- the real height may have grown
int nodeHeight = detail::SkipListRandomHeight::instance()->
getHeight(max_layer + 1);
ScopedLocker guards[MAX_HEIGHT];
if (!lockNodesForChange(nodeHeight, guards, preds, succs)) {
continue; // give up the locks and retry until all valid
}
// locks acquired and all valid, need to modify the links under the locks.
newNode =
NodeType::create(recycler_.alloc(), nodeHeight, std::forward<U>(data));
for (int k = 0; k < nodeHeight; ++k) {
newNode->setSkip(k, succs[k]);
preds[k]->setSkip(k, newNode);
}
newNode->setFullyLinked();
newSize = incrementSize(1);
break;
}
int hgt = height();
size_t sizeLimit =
detail::SkipListRandomHeight::instance()->getSizeLimit(hgt);
if (hgt < MAX_HEIGHT && newSize > sizeLimit) {
growHeight(hgt + 1);
}
CHECK_GT(newSize, 0);
return std::make_pair(newNode, newSize);
}
bool remove(const value_type &data) {
NodeType *nodeToDelete = nullptr;
ScopedLocker nodeGuard;
bool isMarked = false;
int nodeHeight = 0;
NodeType* preds[MAX_HEIGHT], *succs[MAX_HEIGHT];
while (true) {
int max_layer = 0;
int layer = findInsertionPointGetMaxLayer(data, preds, succs, &max_layer);
if (!isMarked && (layer < 0 || !okToDelete(succs[layer], layer))) {
return false;
}
if (!isMarked) {
nodeToDelete = succs[layer];
nodeHeight = nodeToDelete->height();
nodeGuard = nodeToDelete->acquireGuard();
if (nodeToDelete->markedForRemoval()) return false;
nodeToDelete->setMarkedForRemoval();
isMarked = true;
}
// acquire pred locks from bottom layer up
ScopedLocker guards[MAX_HEIGHT];
if (!lockNodesForChange(nodeHeight, guards, preds, succs, false)) {
continue; // this will unlock all the locks
}
for (int k = nodeHeight - 1; k >= 0; --k) {
preds[k]->setSkip(k, nodeToDelete->skip(k));
}
incrementSize(-1);
break;
}
recycle(nodeToDelete);
return true;
}
const value_type *first() const {
auto node = head_.load(std::memory_order_consume)->skip(0);
return node ? &node->data() : nullptr;
}
const value_type *last() const {
NodeType *pred = head_.load(std::memory_order_consume);
NodeType *node = nullptr;
for (int layer = maxLayer(); layer >= 0; --layer) {
do {
node = pred->skip(layer);
if (node) pred = node;
} while (node != nullptr);
}
return pred == head_.load(std::memory_order_relaxed)
? nullptr : &pred->data();
}
static bool okToDelete(NodeType *candidate, int layer) {
DCHECK(candidate != nullptr);
return candidate->fullyLinked() &&
candidate->maxLayer() == layer &&
!candidate->markedForRemoval();
}
// find node for insertion/deleting
int findInsertionPointGetMaxLayer(const value_type &data,
NodeType *preds[], NodeType *succs[], int *max_layer) const {
*max_layer = maxLayer();
return findInsertionPoint(head_.load(std::memory_order_consume),
*max_layer, data, preds, succs);
}
// Find node for access. Returns a paired values:
// pair.first = the first node that no-less than data value
// pair.second = 1 when the data value is founded, or 0 otherwise.
// This is like lower_bound, but not exact: we could have the node marked for
// removal so still need to check that.
std::pair<NodeType*, int> findNode(const value_type &data) const {
return findNodeDownRight(data);
}
// Find node by first stepping down then stepping right. Based on benchmark
// results, this is slightly faster than findNodeRightDown for better
// localality on the skipping pointers.
std::pair<NodeType*, int> findNodeDownRight(const value_type &data) const {
NodeType *pred = head_.load(std::memory_order_consume);
int ht = pred->height();
NodeType *node = nullptr;
bool found = false;
while (!found) {
// stepping down
for (; ht > 0 && less(data, node = pred->skip(ht - 1)); --ht) {}
if (ht == 0) return std::make_pair(node, 0); // not found
// node <= data now, but we need to fix up ht
--ht;
// stepping right
while (greater(data, node)) {
pred = node;
node = node->skip(ht);
}
found = !less(data, node);
}
return std::make_pair(node, found);
}
// find node by first stepping right then stepping down.
// We still keep this for reference purposes.
std::pair<NodeType*, int> findNodeRightDown(const value_type &data) const {
NodeType *pred = head_.load(std::memory_order_consume);
NodeType *node = nullptr;
auto top = maxLayer();
int found = 0;
for (int layer = top; !found && layer >= 0; --layer) {
node = pred->skip(layer);
while (greater(data, node)) {
pred = node;
node = node->skip(layer);
}
found = !less(data, node);
}
return std::make_pair(node, found);
}
NodeType* lower_bound(const value_type &data) const {
auto node = findNode(data).first;
while (node != nullptr && node->markedForRemoval()) {
node = node->skip(0);
}
return node;
}
void growHeight(int height) {
NodeType* oldHead = head_.load(std::memory_order_consume);
if (oldHead->height() >= height) { // someone else already did this
return;
}
NodeType* newHead =
NodeType::create(recycler_.alloc(), height, value_type(), true);
{ // need to guard the head node in case others are adding/removing
// nodes linked to the head.
ScopedLocker g = oldHead->acquireGuard();
newHead->copyHead(oldHead);
NodeType* expected = oldHead;
if (!head_.compare_exchange_strong(expected, newHead,
std::memory_order_release)) {
// if someone has already done the swap, just return.
NodeType::destroy(recycler_.alloc(), newHead);
return;
}
oldHead->setMarkedForRemoval();
}
recycle(oldHead);
}
void recycle(NodeType *node) {
recycler_.add(node);
}
detail::NodeRecycler<NodeType, NodeAlloc> recycler_;
std::atomic<NodeType*> head_;
std::atomic<size_t> size_;
};
template<typename T, typename Comp, typename NodeAlloc, int MAX_HEIGHT>
class ConcurrentSkipList<T, Comp, NodeAlloc, MAX_HEIGHT>::Accessor {
typedef detail::SkipListNode<T> NodeType;
typedef ConcurrentSkipList<T, Comp, NodeAlloc, MAX_HEIGHT> SkipListType;
public:
typedef T value_type;
typedef T key_type;
typedef T& reference;
typedef T* pointer;
typedef const T& const_reference;
typedef const T* const_pointer;
typedef size_t size_type;
typedef Comp key_compare;
typedef Comp value_compare;
typedef typename SkipListType::iterator iterator;
typedef typename SkipListType::const_iterator const_iterator;
typedef typename SkipListType::Skipper Skipper;
explicit Accessor(std::shared_ptr<ConcurrentSkipList> skip_list)
: slHolder_(std::move(skip_list))
{
sl_ = slHolder_.get();
DCHECK(sl_ != nullptr);
sl_->recycler_.addRef();
}
// Unsafe initializer: the caller assumes the responsibility to keep
// skip_list valid during the whole life cycle of the Acessor.
explicit Accessor(ConcurrentSkipList *skip_list) : sl_(skip_list) {
DCHECK(sl_ != nullptr);
sl_->recycler_.addRef();
}
Accessor(const Accessor &accessor) :
sl_(accessor.sl_),
slHolder_(accessor.slHolder_) {
sl_->recycler_.addRef();
}
Accessor& operator=(const Accessor &accessor) {
if (this != &accessor) {
slHolder_ = accessor.slHolder_;
sl_->recycler_.releaseRef();
sl_ = accessor.sl_;
sl_->recycler_.addRef();
}
return *this;
}
~Accessor() {
sl_->recycler_.releaseRef();
}
bool empty() const { return sl_->size() == 0; }
size_t size() const { return sl_->size(); }
size_type max_size() const { return std::numeric_limits<size_type>::max(); }
// returns end() if the value is not in the list, otherwise returns an
// iterator pointing to the data, and it's guaranteed that the data is valid
// as far as the Accessor is hold.
iterator find(const key_type &value) { return iterator(sl_->find(value)); }
const_iterator find(const key_type &value) const {
return iterator(sl_->find(value));
}
size_type count(const key_type &data) const { return contains(data); }
iterator begin() const {
NodeType* head = sl_->head_.load(std::memory_order_consume);
return iterator(head->next());
}
iterator end() const { return iterator(nullptr); }
const_iterator cbegin() const { return begin(); }
const_iterator cend() const { return end(); }
template<typename U,
typename=typename std::enable_if<std::is_convertible<U, T>::value>::type>
std::pair<iterator, bool> insert(U&& data) {
auto ret = sl_->addOrGetData(std::forward<U>(data));
return std::make_pair(iterator(ret.first), ret.second);
}
size_t erase(const key_type &data) { return remove(data); }
iterator lower_bound(const key_type &data) const {
return iterator(sl_->lower_bound(data));
}
size_t height() const { return sl_->height(); }
// first() returns pointer to the first element in the skiplist, or
// nullptr if empty.
//
// last() returns the pointer to the last element in the skiplist,
// nullptr if list is empty.
//
// Note: As concurrent writing can happen, first() is not
// guaranteed to be the min_element() in the list. Similarly
// last() is not guaranteed to be the max_element(), and both of them can
// be invalid (i.e. nullptr), so we name them differently from front() and
// tail() here.
const key_type *first() const { return sl_->first(); }
const key_type *last() const { return sl_->last(); }
// Try to remove the last element in the skip list.
//
// Returns true if we removed it, false if either the list is empty
// or a race condition happened (i.e. the used-to-be last element
// was already removed by another thread).
bool pop_back() {
auto last = sl_->last();
return last ? sl_->remove(*last) : false;
}
std::pair<key_type*, bool> addOrGetData(const key_type &data) {
auto ret = sl_->addOrGetData(data);
return std::make_pair(&ret.first->data(), ret.second);
}
SkipListType* skiplist() const { return sl_; }
// legacy interfaces
// TODO:(xliu) remove these.
// Returns true if the node is added successfully, false if not, i.e. the
// node with the same key already existed in the list.
bool contains(const key_type &data) const { return sl_->find(data); }
bool add(const key_type &data) { return sl_->addOrGetData(data).second; }
bool remove(const key_type &data) { return sl_->remove(data); }
private:
SkipListType *sl_;
std::shared_ptr<SkipListType> slHolder_;
};
// implements forward iterator concept.
template<typename ValT, typename NodeT>
class detail::csl_iterator :
public boost::iterator_facade<csl_iterator<ValT, NodeT>,
ValT, boost::forward_traversal_tag> {
public:
typedef ValT value_type;
typedef value_type& reference;
typedef value_type* pointer;
typedef ptrdiff_t difference_type;
explicit csl_iterator(NodeT* node = nullptr) : node_(node) {}
template<typename OtherVal, typename OtherNode>
csl_iterator(const csl_iterator<OtherVal, OtherNode> &other,
typename std::enable_if<std::is_convertible<OtherVal, ValT>::value>::type*
= 0) : node_(other.node_) {}
size_t nodeSize() const {
return node_ == nullptr ? 0 :
node_->height() * sizeof(NodeT*) + sizeof(*this);
}
bool good() const { return node_ != nullptr; }
private:
friend class boost::iterator_core_access;
template<class,class> friend class csl_iterator;
void increment() { node_ = node_->next(); };
bool equal(const csl_iterator& other) const { return node_ == other.node_; }
value_type& dereference() const { return node_->data(); }
NodeT* node_;
};
// Skipper interface
template<typename T, typename Comp, typename NodeAlloc, int MAX_HEIGHT>
class ConcurrentSkipList<T, Comp, NodeAlloc, MAX_HEIGHT>::Skipper {
typedef detail::SkipListNode<T> NodeType;
typedef ConcurrentSkipList<T, Comp, NodeAlloc, MAX_HEIGHT> SkipListType;
typedef typename SkipListType::Accessor Accessor;
public:
typedef T value_type;
typedef T& reference;
typedef T* pointer;
typedef ptrdiff_t difference_type;
Skipper(const std::shared_ptr<SkipListType>& skipList) :
accessor_(skipList) {
init();
}
Skipper(const Accessor& accessor) : accessor_(accessor) {
init();
}
void init() {
// need to cache the head node
NodeType* head_node = head();
headHeight_ = head_node->height();
for (int i = 0; i < headHeight_; ++i) {
preds_[i] = head_node;
succs_[i] = head_node->skip(i);
}
int max_layer = maxLayer();
for (int i = 0; i < max_layer; ++i) {
hints_[i] = i + 1;
}
hints_[max_layer] = max_layer;
}
// advance to the next node in the list.
Skipper& operator ++() {
preds_[0] = succs_[0];
succs_[0] = preds_[0]->skip(0);
int height = curHeight();
for (int i = 1; i < height && preds_[0] == succs_[i]; ++i) {
preds_[i] = succs_[i];
succs_[i] = preds_[i]->skip(i);
}
return *this;
}
bool good() const { return succs_[0] != nullptr; }
int maxLayer() const { return headHeight_ - 1; }
int curHeight() const {
// need to cap the height to the cached head height, as the current node
// might be some newly inserted node and also during the time period the
// head height may have grown.
return succs_[0] ? std::min(headHeight_, succs_[0]->height()) : 0;
}
const value_type &data() const {
DCHECK(succs_[0] != nullptr);
return succs_[0]->data();
}
value_type &operator *() const {
DCHECK(succs_[0] != nullptr);
return succs_[0]->data();
}
value_type *operator->() {
DCHECK(succs_[0] != nullptr);
return &succs_[0]->data();
}
/*
* Skip to the position whose data is no less than the parameter.
* (I.e. the lower_bound).
*
* Returns true if the data is found, false otherwise.
*/
bool to(const value_type &data) {
int layer = curHeight() - 1;
if (layer < 0) return false; // reaches the end of the list
int lyr = hints_[layer];
int max_layer = maxLayer();
while (SkipListType::greater(data, succs_[lyr]) && lyr < max_layer) {
++lyr;
}
hints_[layer] = lyr; // update the hint
int foundLayer = SkipListType::
findInsertionPoint(preds_[lyr], lyr, data, preds_, succs_);
if (foundLayer < 0) return false;
DCHECK(succs_[0] != nullptr) << "lyr=" << lyr
<< "; max_layer=" << max_layer;
return !succs_[0]->markedForRemoval();
}
private:
NodeType* head() const {
return accessor_.skiplist()->head_.load(std::memory_order_consume);
}
Accessor accessor_;
int headHeight_;
NodeType *succs_[MAX_HEIGHT], *preds_[MAX_HEIGHT];
uint8_t hints_[MAX_HEIGHT];
};
} // namespace folly

42
ios/Pods/Folly/folly/ContainerTraits.h generated Normal file
View File

@ -0,0 +1,42 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/Traits.h>
namespace folly {
FOLLY_CREATE_HAS_MEMBER_FN_TRAITS(container_emplace_back_traits, emplace_back);
template <class Container, typename... Args>
inline
typename std::enable_if<
container_emplace_back_traits<Container, void(Args...)>::value>::type
container_emplace_back_or_push_back(Container& container, Args&&... args) {
container.emplace_back(std::forward<Args>(args)...);
}
template <class Container, typename... Args>
inline
typename std::enable_if<
!container_emplace_back_traits<Container, void(Args...)>::value>::type
container_emplace_back_or_push_back(Container& container, Args&&... args) {
using v = typename Container::value_type;
container.push_back(v(std::forward<Args>(args)...));
}
}

764
ios/Pods/Folly/folly/Conv.cpp generated Normal file
View File

@ -0,0 +1,764 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <folly/Conv.h>
#include <array>
namespace folly {
namespace detail {
namespace {
/**
* Finds the first non-digit in a string. The number of digits
* searched depends on the precision of the Tgt integral. Assumes the
* string starts with NO whitespace and NO sign.
*
* The semantics of the routine is:
* for (;; ++b) {
* if (b >= e || !isdigit(*b)) return b;
* }
*
* Complete unrolling marks bottom-line (i.e. entire conversion)
* improvements of 20%.
*/
inline const char* findFirstNonDigit(const char* b, const char* e) {
for (; b < e; ++b) {
auto const c = static_cast<unsigned>(*b) - '0';
if (c >= 10) {
break;
}
}
return b;
}
// Maximum value of number when represented as a string
template <class T>
struct MaxString {
static const char* const value;
};
template <> const char *const MaxString<uint8_t>::value = "255";
template <> const char *const MaxString<uint16_t>::value = "65535";
template <> const char *const MaxString<uint32_t>::value = "4294967295";
#if __SIZEOF_LONG__ == 4
template <> const char *const MaxString<unsigned long>::value =
"4294967295";
#else
template <> const char *const MaxString<unsigned long>::value =
"18446744073709551615";
#endif
static_assert(sizeof(unsigned long) >= 4,
"Wrong value for MaxString<unsigned long>::value,"
" please update.");
template <> const char *const MaxString<unsigned long long>::value =
"18446744073709551615";
static_assert(sizeof(unsigned long long) >= 8,
"Wrong value for MaxString<unsigned long long>::value"
", please update.");
#if FOLLY_HAVE_INT128_T
template <> const char *const MaxString<__uint128_t>::value =
"340282366920938463463374607431768211455";
#endif
/*
* Lookup tables that converts from a decimal character value to an integral
* binary value, shifted by a decimal "shift" multiplier.
* For all character values in the range '0'..'9', the table at those
* index locations returns the actual decimal value shifted by the multiplier.
* For all other values, the lookup table returns an invalid OOR value.
*/
// Out-of-range flag value, larger than the largest value that can fit in
// four decimal bytes (9999), but four of these added up together should
// still not overflow uint16_t.
constexpr int32_t OOR = 10000;
FOLLY_ALIGNED(16) constexpr uint16_t shift1[] = {
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 0-9
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 10
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 20
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 30
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, 0, // 40
1, 2, 3, 4, 5, 6, 7, 8, 9, OOR, OOR,
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 60
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 70
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 80
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 90
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 100
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 110
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 120
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 130
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 140
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 150
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 160
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 170
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 180
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 190
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 200
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 210
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 220
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 230
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 240
OOR, OOR, OOR, OOR, OOR, OOR // 250
};
FOLLY_ALIGNED(16) constexpr uint16_t shift10[] = {
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 0-9
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 10
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 20
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 30
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, 0, // 40
10, 20, 30, 40, 50, 60, 70, 80, 90, OOR, OOR,
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 60
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 70
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 80
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 90
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 100
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 110
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 120
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 130
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 140
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 150
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 160
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 170
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 180
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 190
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 200
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 210
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 220
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 230
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 240
OOR, OOR, OOR, OOR, OOR, OOR // 250
};
FOLLY_ALIGNED(16) constexpr uint16_t shift100[] = {
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 0-9
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 10
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 20
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 30
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, 0, // 40
100, 200, 300, 400, 500, 600, 700, 800, 900, OOR, OOR,
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 60
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 70
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 80
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 90
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 100
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 110
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 120
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 130
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 140
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 150
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 160
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 170
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 180
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 190
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 200
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 210
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 220
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 230
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 240
OOR, OOR, OOR, OOR, OOR, OOR // 250
};
FOLLY_ALIGNED(16) constexpr uint16_t shift1000[] = {
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 0-9
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 10
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 20
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 30
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, 0, // 40
1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, OOR, OOR,
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 60
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 70
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 80
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 90
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 100
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 110
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 120
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 130
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 140
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 150
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 160
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 170
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 180
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 190
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 200
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 210
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 220
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 230
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 240
OOR, OOR, OOR, OOR, OOR, OOR // 250
};
struct ErrorString {
const char* string;
bool quote;
};
// Keep this in sync with ConversionCode in Conv.h
constexpr const std::array<
ErrorString,
static_cast<std::size_t>(ConversionCode::NUM_ERROR_CODES)>
kErrorStrings{{
{"Success", true},
{"Empty input string", true},
{"No digits found in input string", true},
{"Integer overflow when parsing bool (must be 0 or 1)", true},
{"Invalid value for bool", true},
{"Non-digit character found", true},
{"Invalid leading character", true},
{"Overflow during conversion", true},
{"Negative overflow during conversion", true},
{"Unable to convert string to floating point value", true},
{"Non-whitespace character found after end of conversion", true},
{"Overflow during arithmetic conversion", false},
{"Negative overflow during arithmetic conversion", false},
{"Loss of precision during arithmetic conversion", false},
}};
// Check if ASCII is really ASCII
using IsAscii = std::
integral_constant<bool, 'A' == 65 && 'Z' == 90 && 'a' == 97 && 'z' == 122>;
// The code in this file that uses tolower() really only cares about
// 7-bit ASCII characters, so we can take a nice shortcut here.
inline char tolower_ascii(char in) {
return IsAscii::value ? in | 0x20 : std::tolower(in);
}
inline bool bool_str_cmp(const char** b, size_t len, const char* value) {
// Can't use strncasecmp, since we want to ensure that the full value matches
const char* p = *b;
const char* e = *b + len;
const char* v = value;
while (*v != '\0') {
if (p == e || tolower_ascii(*p) != *v) { // value is already lowercase
return false;
}
++p;
++v;
}
*b = p;
return true;
}
} // anonymous namespace
Expected<bool, ConversionCode> str_to_bool(StringPiece* src) noexcept {
auto b = src->begin(), e = src->end();
for (;; ++b) {
if (b >= e) {
return makeUnexpected(ConversionCode::EMPTY_INPUT_STRING);
}
if (!std::isspace(*b)) {
break;
}
}
bool result;
size_t len = e - b;
switch (*b) {
case '0':
case '1': {
result = false;
for (; b < e && isdigit(*b); ++b) {
if (result || (*b != '0' && *b != '1')) {
return makeUnexpected(ConversionCode::BOOL_OVERFLOW);
}
result = (*b == '1');
}
break;
}
case 'y':
case 'Y':
result = true;
if (!bool_str_cmp(&b, len, "yes")) {
++b; // accept the single 'y' character
}
break;
case 'n':
case 'N':
result = false;
if (!bool_str_cmp(&b, len, "no")) {
++b;
}
break;
case 't':
case 'T':
result = true;
if (!bool_str_cmp(&b, len, "true")) {
++b;
}
break;
case 'f':
case 'F':
result = false;
if (!bool_str_cmp(&b, len, "false")) {
++b;
}
break;
case 'o':
case 'O':
if (bool_str_cmp(&b, len, "on")) {
result = true;
} else if (bool_str_cmp(&b, len, "off")) {
result = false;
} else {
return makeUnexpected(ConversionCode::BOOL_INVALID_VALUE);
}
break;
default:
return makeUnexpected(ConversionCode::BOOL_INVALID_VALUE);
}
src->assign(b, e);
return result;
}
/**
* StringPiece to double, with progress information. Alters the
* StringPiece parameter to munch the already-parsed characters.
*/
template <class Tgt>
Expected<Tgt, ConversionCode> str_to_floating(StringPiece* src) noexcept {
using namespace double_conversion;
static StringToDoubleConverter
conv(StringToDoubleConverter::ALLOW_TRAILING_JUNK
| StringToDoubleConverter::ALLOW_LEADING_SPACES,
0.0,
// return this for junk input string
std::numeric_limits<double>::quiet_NaN(),
nullptr, nullptr);
if (src->empty()) {
return makeUnexpected(ConversionCode::EMPTY_INPUT_STRING);
}
int length;
auto result = conv.StringToDouble(src->data(),
static_cast<int>(src->size()),
&length); // processed char count
if (!std::isnan(result)) {
// If we get here with length = 0, the input string is empty.
// If we get here with result = 0.0, it's either because the string
// contained only whitespace, or because we had an actual zero value
// (with potential trailing junk). If it was only whitespace, we
// want to raise an error; length will point past the last character
// that was processed, so we need to check if that character was
// whitespace or not.
if (length == 0 || (result == 0.0 && std::isspace((*src)[length - 1]))) {
return makeUnexpected(ConversionCode::EMPTY_INPUT_STRING);
}
src->advance(length);
return result;
}
auto* e = src->end();
auto* b =
std::find_if_not(src->begin(), e, [](char c) { return std::isspace(c); });
// There must be non-whitespace, otherwise we would have caught this above
assert(b < e);
size_t size = e - b;
bool negative = false;
if (*b == '-') {
negative = true;
++b;
--size;
}
result = 0.0;
switch (tolower_ascii(*b)) {
case 'i':
if (size >= 3 && tolower_ascii(b[1]) == 'n' &&
tolower_ascii(b[2]) == 'f') {
if (size >= 8 && tolower_ascii(b[3]) == 'i' &&
tolower_ascii(b[4]) == 'n' && tolower_ascii(b[5]) == 'i' &&
tolower_ascii(b[6]) == 't' && tolower_ascii(b[7]) == 'y') {
b += 8;
} else {
b += 3;
}
result = std::numeric_limits<Tgt>::infinity();
}
break;
case 'n':
if (size >= 3 && tolower_ascii(b[1]) == 'a' &&
tolower_ascii(b[2]) == 'n') {
b += 3;
result = std::numeric_limits<Tgt>::quiet_NaN();
}
break;
default:
break;
}
if (result == 0.0) {
// All bets are off
return makeUnexpected(ConversionCode::STRING_TO_FLOAT_ERROR);
}
if (negative) {
result = -result;
}
src->assign(b, e);
return result;
}
template Expected<float, ConversionCode> str_to_floating<float>(
StringPiece* src) noexcept;
template Expected<double, ConversionCode> str_to_floating<double>(
StringPiece* src) noexcept;
/**
* This class takes care of additional processing needed for signed values,
* like leading sign character and overflow checks.
*/
template <typename T, bool IsSigned = std::is_signed<T>::value>
class SignedValueHandler;
template <typename T>
class SignedValueHandler<T, true> {
public:
ConversionCode init(const char*& b) {
negative_ = false;
if (!std::isdigit(*b)) {
if (*b == '-') {
negative_ = true;
} else if (UNLIKELY(*b != '+')) {
return ConversionCode::INVALID_LEADING_CHAR;
}
++b;
}
return ConversionCode::SUCCESS;
}
ConversionCode overflow() {
return negative_ ? ConversionCode::NEGATIVE_OVERFLOW
: ConversionCode::POSITIVE_OVERFLOW;
}
template <typename U>
Expected<T, ConversionCode> finalize(U value) {
T rv;
if (negative_) {
rv = -value;
if (UNLIKELY(rv > 0)) {
return makeUnexpected(ConversionCode::NEGATIVE_OVERFLOW);
}
} else {
rv = value;
if (UNLIKELY(rv < 0)) {
return makeUnexpected(ConversionCode::POSITIVE_OVERFLOW);
}
}
return rv;
}
private:
bool negative_;
};
// For unsigned types, we don't need any extra processing
template <typename T>
class SignedValueHandler<T, false> {
public:
ConversionCode init(const char*&) {
return ConversionCode::SUCCESS;
}
ConversionCode overflow() {
return ConversionCode::POSITIVE_OVERFLOW;
}
Expected<T, ConversionCode> finalize(T value) {
return value;
}
};
/**
* String represented as a pair of pointers to char to signed/unsigned
* integrals. Assumes NO whitespace before or after, and also that the
* string is composed entirely of digits (and an optional sign only for
* signed types). String may be empty, in which case digits_to returns
* an appropriate error.
*/
template <class Tgt>
inline Expected<Tgt, ConversionCode> digits_to(
const char* b,
const char* const e) noexcept {
using UT = typename std::make_unsigned<Tgt>::type;
assert(b <= e);
SignedValueHandler<Tgt> sgn;
auto err = sgn.init(b);
if (UNLIKELY(err != ConversionCode::SUCCESS)) {
return makeUnexpected(err);
}
size_t size = e - b;
/* Although the string is entirely made of digits, we still need to
* check for overflow.
*/
if (size > std::numeric_limits<UT>::digits10) {
// Leading zeros?
if (b < e && *b == '0') {
for (++b;; ++b) {
if (b == e) {
return Tgt(0); // just zeros, e.g. "0000"
}
if (*b != '0') {
size = e - b;
break;
}
}
}
if (size > std::numeric_limits<UT>::digits10 &&
(size != std::numeric_limits<UT>::digits10 + 1 ||
strncmp(b, MaxString<UT>::value, size) > 0)) {
return makeUnexpected(sgn.overflow());
}
}
// Here we know that the number won't overflow when
// converted. Proceed without checks.
UT result = 0;
for (; e - b >= 4; b += 4) {
result *= 10000;
const int32_t r0 = shift1000[static_cast<size_t>(b[0])];
const int32_t r1 = shift100[static_cast<size_t>(b[1])];
const int32_t r2 = shift10[static_cast<size_t>(b[2])];
const int32_t r3 = shift1[static_cast<size_t>(b[3])];
const auto sum = r0 + r1 + r2 + r3;
if (sum >= OOR) {
goto outOfRange;
}
result += sum;
}
switch (e - b) {
case 3: {
const int32_t r0 = shift100[static_cast<size_t>(b[0])];
const int32_t r1 = shift10[static_cast<size_t>(b[1])];
const int32_t r2 = shift1[static_cast<size_t>(b[2])];
const auto sum = r0 + r1 + r2;
if (sum >= OOR) {
goto outOfRange;
}
result = 1000 * result + sum;
break;
}
case 2: {
const int32_t r0 = shift10[static_cast<size_t>(b[0])];
const int32_t r1 = shift1[static_cast<size_t>(b[1])];
const auto sum = r0 + r1;
if (sum >= OOR) {
goto outOfRange;
}
result = 100 * result + sum;
break;
}
case 1: {
const int32_t sum = shift1[static_cast<size_t>(b[0])];
if (sum >= OOR) {
goto outOfRange;
}
result = 10 * result + sum;
break;
}
default:
assert(b == e);
if (size == 0) {
return makeUnexpected(ConversionCode::NO_DIGITS);
}
break;
}
return sgn.finalize(result);
outOfRange:
return makeUnexpected(ConversionCode::NON_DIGIT_CHAR);
}
template Expected<char, ConversionCode> digits_to<char>(
const char*,
const char*) noexcept;
template Expected<signed char, ConversionCode> digits_to<signed char>(
const char*,
const char*) noexcept;
template Expected<unsigned char, ConversionCode> digits_to<unsigned char>(
const char*,
const char*) noexcept;
template Expected<short, ConversionCode> digits_to<short>(
const char*,
const char*) noexcept;
template Expected<unsigned short, ConversionCode> digits_to<unsigned short>(
const char*,
const char*) noexcept;
template Expected<int, ConversionCode> digits_to<int>(
const char*,
const char*) noexcept;
template Expected<unsigned int, ConversionCode> digits_to<unsigned int>(
const char*,
const char*) noexcept;
template Expected<long, ConversionCode> digits_to<long>(
const char*,
const char*) noexcept;
template Expected<unsigned long, ConversionCode> digits_to<unsigned long>(
const char*,
const char*) noexcept;
template Expected<long long, ConversionCode> digits_to<long long>(
const char*,
const char*) noexcept;
template Expected<unsigned long long, ConversionCode>
digits_to<unsigned long long>(const char*, const char*) noexcept;
#if FOLLY_HAVE_INT128_T
template Expected<__int128, ConversionCode> digits_to<__int128>(
const char*,
const char*) noexcept;
template Expected<unsigned __int128, ConversionCode>
digits_to<unsigned __int128>(const char*, const char*) noexcept;
#endif
/**
* StringPiece to integrals, with progress information. Alters the
* StringPiece parameter to munch the already-parsed characters.
*/
template <class Tgt>
Expected<Tgt, ConversionCode> str_to_integral(StringPiece* src) noexcept {
using UT = typename std::make_unsigned<Tgt>::type;
auto b = src->data(), past = src->data() + src->size();
for (;; ++b) {
if (UNLIKELY(b >= past)) {
return makeUnexpected(ConversionCode::EMPTY_INPUT_STRING);
}
if (!std::isspace(*b)) {
break;
}
}
SignedValueHandler<Tgt> sgn;
auto err = sgn.init(b);
if (UNLIKELY(err != ConversionCode::SUCCESS)) {
return makeUnexpected(err);
}
if (std::is_signed<Tgt>::value && UNLIKELY(b >= past)) {
return makeUnexpected(ConversionCode::NO_DIGITS);
}
if (UNLIKELY(!isdigit(*b))) {
return makeUnexpected(ConversionCode::NON_DIGIT_CHAR);
}
auto m = findFirstNonDigit(b + 1, past);
auto tmp = digits_to<UT>(b, m);
if (UNLIKELY(!tmp.hasValue())) {
return makeUnexpected(
tmp.error() == ConversionCode::POSITIVE_OVERFLOW ? sgn.overflow()
: tmp.error());
}
auto res = sgn.finalize(tmp.value());
if (res.hasValue()) {
src->advance(m - src->data());
}
return res;
}
template Expected<char, ConversionCode> str_to_integral<char>(
StringPiece* src) noexcept;
template Expected<signed char, ConversionCode> str_to_integral<signed char>(
StringPiece* src) noexcept;
template Expected<unsigned char, ConversionCode> str_to_integral<unsigned char>(
StringPiece* src) noexcept;
template Expected<short, ConversionCode> str_to_integral<short>(
StringPiece* src) noexcept;
template Expected<unsigned short, ConversionCode>
str_to_integral<unsigned short>(StringPiece* src) noexcept;
template Expected<int, ConversionCode> str_to_integral<int>(
StringPiece* src) noexcept;
template Expected<unsigned int, ConversionCode> str_to_integral<unsigned int>(
StringPiece* src) noexcept;
template Expected<long, ConversionCode> str_to_integral<long>(
StringPiece* src) noexcept;
template Expected<unsigned long, ConversionCode> str_to_integral<unsigned long>(
StringPiece* src) noexcept;
template Expected<long long, ConversionCode> str_to_integral<long long>(
StringPiece* src) noexcept;
template Expected<unsigned long long, ConversionCode>
str_to_integral<unsigned long long>(StringPiece* src) noexcept;
#if FOLLY_HAVE_INT128_T
template Expected<__int128, ConversionCode> str_to_integral<__int128>(
StringPiece* src) noexcept;
template Expected<unsigned __int128, ConversionCode>
str_to_integral<unsigned __int128>(StringPiece* src) noexcept;
#endif
} // namespace detail
ConversionError makeConversionError(ConversionCode code, StringPiece input) {
using namespace detail;
static_assert(
std::is_unsigned<std::underlying_type<ConversionCode>::type>::value,
"ConversionCode should be unsigned");
assert((std::size_t)code < kErrorStrings.size());
const ErrorString& err = kErrorStrings[(std::size_t)code];
if (code == ConversionCode::EMPTY_INPUT_STRING && input.empty()) {
return {err.string, code};
}
std::string tmp(err.string);
tmp.append(": ");
if (err.quote) {
tmp.append(1, '"');
}
if (input.size() > 0) {
tmp.append(input.data(), input.size());
}
if (err.quote) {
tmp.append(1, '"');
}
return {tmp, code};
}
} // namespace folly

1541
ios/Pods/Folly/folly/Conv.h generated Normal file

File diff suppressed because it is too large Load Diff

77
ios/Pods/Folly/folly/CppAttributes.h generated Normal file
View File

@ -0,0 +1,77 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* GCC compatible wrappers around clang attributes.
*
* @author Dominik Gabi
*/
#pragma once
#ifndef __has_cpp_attribute
#define FOLLY_HAS_CPP_ATTRIBUTE(x) 0
#else
#define FOLLY_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
#endif
#ifndef __has_extension
#define FOLLY_HAS_EXTENSION(x) 0
#else
#define FOLLY_HAS_EXTENSION(x) __has_extension(x)
#endif
/**
* Fallthrough to indicate that `break` was left out on purpose in a switch
* statement, e.g.
*
* switch (n) {
* case 22:
* case 33: // no warning: no statements between case labels
* f();
* case 44: // warning: unannotated fall-through
* g();
* FOLLY_FALLTHROUGH; // no warning: annotated fall-through
* }
*/
#if FOLLY_HAS_CPP_ATTRIBUTE(clang::fallthrough)
#define FOLLY_FALLTHROUGH [[clang::fallthrough]]
#else
#define FOLLY_FALLTHROUGH
#endif
/**
* Nullable indicates that a return value or a parameter may be a `nullptr`,
* e.g.
*
* int* FOLLY_NULLABLE foo(int* a, int* FOLLY_NULLABLE b) {
* if (*a > 0) { // safe dereference
* return nullptr;
* }
* if (*b < 0) { // unsafe dereference
* return *a;
* }
* if (b != nullptr && *b == 1) { // safe checked dereference
* return new int(1);
* }
* return nullptr;
* }
*/
#if FOLLY_HAS_EXTENSION(nullability)
#define FOLLY_NULLABLE _Nullable
#else
#define FOLLY_NULLABLE
#endif

210
ios/Pods/Folly/folly/CpuId.h generated Normal file
View File

@ -0,0 +1,210 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdint>
#include <folly/Portability.h>
#ifdef _MSC_VER
#include <intrin.h>
#endif
namespace folly {
/**
* Identification of an Intel CPU.
* Supports CPUID feature flags (EAX=1) and extended features (EAX=7, ECX=0).
* Values from http://www.intel.com/content/www/us/en/processors/processor-identification-cpuid-instruction-note.html
*/
class CpuId {
public:
// Always inline in order for this to be usable from a __ifunc__.
// In shared library mde, a __ifunc__ runs at relocation time, while the
// PLT hasn't been fully populated yet; thus, ifuncs cannot use symbols
// with potentially external linkage. (This issue is less likely in opt
// mode since inlining happens more likely, and it doesn't happen for
// statically linked binaries which don't depend on the PLT)
FOLLY_ALWAYS_INLINE CpuId() {
#ifdef _MSC_VER
int reg[4];
__cpuid(static_cast<int*>(reg), 0);
const int n = reg[0];
if (n >= 1) {
__cpuid(static_cast<int*>(reg), 1);
f1c_ = reg[2];
f1d_ = reg[3];
}
if (n >= 7) {
__cpuidex(static_cast<int*>(reg), 7, 0);
f7b_ = reg[1];
f7c_ = reg[2];
}
#elif defined(__i386__) && defined(__PIC__) && !defined(__clang__) && \
defined(__GNUC__)
// The following block like the normal cpuid branch below, but gcc
// reserves ebx for use of it's pic register so we must specially
// handle the save and restore to avoid clobbering the register
uint32_t n;
__asm__(
"pushl %%ebx\n\t"
"cpuid\n\t"
"popl %%ebx\n\t"
: "=a"(n)
: "a"(0)
: "edx", "ecx");
if (n >= 1) {
__asm__(
"pushl %%ebx\n\t"
"cpuid\n\t"
"popl %%ebx\n\t"
: "=c"(f1c_), "=d"(f1d_)
: "a"(1)
:);
}
if (n >= 7) {
__asm__(
"pushl %%ebx\n\t"
"cpuid\n\t"
"movl %%ebx, %%eax\n\r"
"popl %%ebx"
: "=a"(f7b_), "=c"(f7c_)
: "a"(7), "c"(0)
: "edx");
}
#elif FOLLY_X64 || defined(__i386__)
uint32_t n;
__asm__("cpuid" : "=a"(n) : "a"(0) : "ebx", "edx", "ecx");
if (n >= 1) {
__asm__("cpuid" : "=c"(f1c_), "=d"(f1d_) : "a"(1) : "ebx");
}
if (n >= 7) {
__asm__("cpuid" : "=b"(f7b_), "=c"(f7c_) : "a"(7), "c"(0) : "edx");
}
#endif
}
#define X(name, r, bit) \
FOLLY_ALWAYS_INLINE bool name() const { \
return (r) & (1U << bit); \
}
// cpuid(1): Processor Info and Feature Bits.
#define C(name, bit) X(name, f1c_, bit)
C(sse3, 0)
C(pclmuldq, 1)
C(dtes64, 2)
C(monitor, 3)
C(dscpl, 4)
C(vmx, 5)
C(smx, 6)
C(eist, 7)
C(tm2, 8)
C(ssse3, 9)
C(cnxtid, 10)
C(fma, 12)
C(cx16, 13)
C(xtpr, 14)
C(pdcm, 15)
C(pcid, 17)
C(dca, 18)
C(sse41, 19)
C(sse42, 20)
C(x2apic, 21)
C(movbe, 22)
C(popcnt, 23)
C(tscdeadline, 24)
C(aes, 25)
C(xsave, 26)
C(osxsave, 27)
C(avx, 28)
C(f16c, 29)
C(rdrand, 30)
#undef C
#define D(name, bit) X(name, f1d_, bit)
D(fpu, 0)
D(vme, 1)
D(de, 2)
D(pse, 3)
D(tsc, 4)
D(msr, 5)
D(pae, 6)
D(mce, 7)
D(cx8, 8)
D(apic, 9)
D(sep, 11)
D(mtrr, 12)
D(pge, 13)
D(mca, 14)
D(cmov, 15)
D(pat, 16)
D(pse36, 17)
D(psn, 18)
D(clfsh, 19)
D(ds, 21)
D(acpi, 22)
D(mmx, 23)
D(fxsr, 24)
D(sse, 25)
D(sse2, 26)
D(ss, 27)
D(htt, 28)
D(tm, 29)
D(pbe, 31)
#undef D
// cpuid(7): Extended Features.
#define B(name, bit) X(name, f7b_, bit)
B(bmi1, 3)
B(hle, 4)
B(avx2, 5)
B(smep, 7)
B(bmi2, 8)
B(erms, 9)
B(invpcid, 10)
B(rtm, 11)
B(mpx, 14)
B(avx512f, 16)
B(avx512dq, 17)
B(rdseed, 18)
B(adx, 19)
B(smap, 20)
B(avx512ifma, 21)
B(pcommit, 22)
B(clflushopt, 23)
B(clwb, 24)
B(avx512pf, 26)
B(avx512er, 27)
B(avx512cd, 28)
B(sha, 29)
B(avx512bw, 30)
B(avx512vl, 31)
#undef B
#define C(name, bit) X(name, f7c_, bit)
C(prefetchwt1, 0)
C(avx512vbmi, 1)
#undef C
#undef X
private:
uint32_t f1c_ = 0;
uint32_t f1d_ = 0;
uint32_t f7b_ = 0;
uint32_t f7c_ = 0;
};
} // namespace folly

162
ios/Pods/Folly/folly/Demangle.cpp generated Normal file
View File

@ -0,0 +1,162 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <folly/Demangle.h>
#include <algorithm>
#include <string.h>
#include <folly/Malloc.h>
#include <folly/portability/Config.h>
#if FOLLY_HAVE_CPLUS_DEMANGLE_V3_CALLBACK
# include <cxxabi.h>
// From libiberty
//
// TODO(tudorb): Detect this with autoconf for the open-source version.
//
// __attribute__((__weak__)) doesn't work, because cplus_demangle_v3_callback
// is exported by an object file in libiberty.a, and the ELF spec says
// "The link editor does not extract archive members to resolve undefined weak
// symbols" (but, interestingly enough, will resolve undefined weak symbols
// with definitions from archive members that were extracted in order to
// resolve an undefined global (strong) symbol)
# ifndef DMGL_NO_OPTS
# define FOLLY_DEFINED_DMGL 1
# define DMGL_NO_OPTS 0 /* For readability... */
# define DMGL_PARAMS (1 << 0) /* Include function args */
# define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */
# define DMGL_JAVA (1 << 2) /* Demangle as Java rather than C++. */
# define DMGL_VERBOSE (1 << 3) /* Include implementation details. */
# define DMGL_TYPES (1 << 4) /* Also try to demangle type encodings. */
# define DMGL_RET_POSTFIX (1 << 5) /* Print function return types (when
present) after function signature */
# endif
extern "C" int cplus_demangle_v3_callback(
const char* mangled,
int options, // We use DMGL_PARAMS | DMGL_TYPES, aka 0x11
void (*callback)(const char*, size_t, void*),
void* arg);
#endif
namespace folly {
#if FOLLY_HAVE_CPLUS_DEMANGLE_V3_CALLBACK
fbstring demangle(const char* name) {
#ifdef FOLLY_DEMANGLE_MAX_SYMBOL_SIZE
// GCC's __cxa_demangle() uses on-stack data structures for the
// parser state which are linear in the number of components of the
// symbol. For extremely long symbols, this can cause a stack
// overflow. We set an arbitrary symbol length limit above which we
// just return the mangled name.
size_t mangledLen = strlen(name);
if (mangledLen > FOLLY_DEMANGLE_MAX_SYMBOL_SIZE) {
return fbstring(name, mangledLen);
}
#endif
int status;
size_t len = 0;
// malloc() memory for the demangled type name
char* demangled = abi::__cxa_demangle(name, nullptr, &len, &status);
if (status != 0) {
return name;
}
// len is the length of the buffer (including NUL terminator and maybe
// other junk)
return fbstring(demangled, strlen(demangled), len, AcquireMallocatedString());
}
namespace {
struct DemangleBuf {
char* dest;
size_t remaining;
size_t total;
};
void demangleCallback(const char* str, size_t size, void* p) {
DemangleBuf* buf = static_cast<DemangleBuf*>(p);
size_t n = std::min(buf->remaining, size);
memcpy(buf->dest, str, n);
buf->dest += n;
buf->remaining -= n;
buf->total += size;
}
} // namespace
size_t demangle(const char* name, char* out, size_t outSize) {
#ifdef FOLLY_DEMANGLE_MAX_SYMBOL_SIZE
size_t mangledLen = strlen(name);
if (mangledLen > FOLLY_DEMANGLE_MAX_SYMBOL_SIZE) {
if (outSize) {
size_t n = std::min(mangledLen, outSize - 1);
memcpy(out, name, n);
out[n] = '\0';
}
return mangledLen;
}
#endif
DemangleBuf dbuf;
dbuf.dest = out;
dbuf.remaining = outSize ? outSize - 1 : 0; // leave room for null term
dbuf.total = 0;
// Unlike most library functions, this returns 1 on success and 0 on failure
int status = cplus_demangle_v3_callback(
name,
DMGL_PARAMS | DMGL_ANSI | DMGL_TYPES,
demangleCallback,
&dbuf);
if (status == 0) { // failed, return original
return folly::strlcpy(out, name, outSize);
}
if (outSize != 0) {
*dbuf.dest = '\0';
}
return dbuf.total;
}
#else
fbstring demangle(const char* name) {
return name;
}
size_t demangle(const char* name, char* out, size_t outSize) {
return folly::strlcpy(out, name, outSize);
}
#endif
size_t strlcpy(char* dest, const char* const src, size_t size) {
size_t len = strlen(src);
if (size != 0) {
size_t n = std::min(len, size - 1); // always null terminate!
memcpy(dest, src, n);
dest[n] = '\0';
}
return len;
}
} // folly

65
ios/Pods/Folly/folly/Demangle.h generated Normal file
View File

@ -0,0 +1,65 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/FBString.h>
namespace folly {
/**
* Return the demangled (prettyfied) version of a C++ type.
*
* This function tries to produce a human-readable type, but the type name will
* be returned unchanged in case of error or if demangling isn't supported on
* your system.
*
* Use for debugging -- do not rely on demangle() returning anything useful.
*
* This function may allocate memory (and therefore throw std::bad_alloc).
*/
fbstring demangle(const char* name);
inline fbstring demangle(const std::type_info& type) {
return demangle(type.name());
}
/**
* Return the demangled (prettyfied) version of a C++ type in a user-provided
* buffer.
*
* The semantics are the same as for snprintf or strlcpy: bufSize is the size
* of the buffer, the string is always null-terminated, and the return value is
* the number of characters (not including the null terminator) that would have
* been written if the buffer was big enough. (So a return value >= bufSize
* indicates that the output was truncated)
*
* This function does not allocate memory and is async-signal-safe.
*
* Note that the underlying function for the fbstring-returning demangle is
* somewhat standard (abi::__cxa_demangle, which uses malloc), the underlying
* function for this version is less so (cplus_demangle_v3_callback from
* libiberty), so it is possible for the fbstring version to work, while this
* version returns the original, mangled name.
*/
size_t demangle(const char* name, char* buf, size_t bufSize);
inline size_t demangle(const std::type_info& type, char* buf, size_t bufSize) {
return demangle(type.name(), buf, bufSize);
}
// glibc doesn't have strlcpy
size_t strlcpy(char* dest, const char* const src, size_t size);
}

218
ios/Pods/Folly/folly/DiscriminatedPtr.h generated Normal file
View File

@ -0,0 +1,218 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Discriminated pointer: Type-safe pointer to one of several types.
*
* Similar to boost::variant, but has no space overhead over a raw pointer, as
* it relies on the fact that (on x86_64) there are 16 unused bits in a
* pointer.
*
* @author Tudor Bosman (tudorb@fb.com)
*/
#pragma once
#include <limits>
#include <stdexcept>
#include <glog/logging.h>
#include <folly/Likely.h>
#include <folly/Portability.h>
#include <folly/detail/DiscriminatedPtrDetail.h>
#if !FOLLY_X64 && !FOLLY_A64 && !FOLLY_PPC64
# error "DiscriminatedPtr is x64, arm64 and ppc64 specific code."
#endif
namespace folly {
/**
* Discriminated pointer.
*
* Given a list of types, a DiscriminatedPtr<Types...> may point to an object
* of one of the given types, or may be empty. DiscriminatedPtr is type-safe:
* you may only get a pointer to the type that you put in, otherwise get
* throws an exception (and get_nothrow returns nullptr)
*
* This pointer does not do any kind of lifetime management -- it's not a
* "smart" pointer. You are responsible for deallocating any memory used
* to hold pointees, if necessary.
*/
template <typename... Types>
class DiscriminatedPtr {
// <, not <=, as our indexes are 1-based (0 means "empty")
static_assert(sizeof...(Types) < std::numeric_limits<uint16_t>::max(),
"too many types");
public:
/**
* Create an empty DiscriminatedPtr.
*/
DiscriminatedPtr() : data_(0) {
}
/**
* Create a DiscriminatedPtr that points to an object of type T.
* Fails at compile time if T is not a valid type (listed in Types)
*/
template <typename T>
explicit DiscriminatedPtr(T* ptr) {
set(ptr, typeIndex<T>());
}
/**
* Set this DiscriminatedPtr to point to an object of type T.
* Fails at compile time if T is not a valid type (listed in Types)
*/
template <typename T>
void set(T* ptr) {
set(ptr, typeIndex<T>());
}
/**
* Get a pointer to the object that this DiscriminatedPtr points to, if it is
* of type T. Fails at compile time if T is not a valid type (listed in
* Types), and returns nullptr if this DiscriminatedPtr is empty or points to
* an object of a different type.
*/
template <typename T>
T* get_nothrow() noexcept {
void* p = LIKELY(hasType<T>()) ? ptr() : nullptr;
return static_cast<T*>(p);
}
template <typename T>
const T* get_nothrow() const noexcept {
const void* p = LIKELY(hasType<T>()) ? ptr() : nullptr;
return static_cast<const T*>(p);
}
/**
* Get a pointer to the object that this DiscriminatedPtr points to, if it is
* of type T. Fails at compile time if T is not a valid type (listed in
* Types), and throws std::invalid_argument if this DiscriminatedPtr is empty
* or points to an object of a different type.
*/
template <typename T>
T* get() {
if (UNLIKELY(!hasType<T>())) {
throw std::invalid_argument("Invalid type");
}
return static_cast<T*>(ptr());
}
template <typename T>
const T* get() const {
if (UNLIKELY(!hasType<T>())) {
throw std::invalid_argument("Invalid type");
}
return static_cast<const T*>(ptr());
}
/**
* Return true iff this DiscriminatedPtr is empty.
*/
bool empty() const {
return index() == 0;
}
/**
* Return true iff the object pointed by this DiscriminatedPtr has type T,
* false otherwise. Fails at compile time if T is not a valid type (listed
* in Types...)
*/
template <typename T>
bool hasType() const {
return index() == typeIndex<T>();
}
/**
* Clear this DiscriminatedPtr, making it empty.
*/
void clear() {
data_ = 0;
}
/**
* Assignment operator from a pointer of type T.
*/
template <typename T>
DiscriminatedPtr& operator=(T* ptr) {
set(ptr);
return *this;
}
/**
* Apply a visitor to this object, calling the appropriate overload for
* the type currently stored in DiscriminatedPtr. Throws invalid_argument
* if the DiscriminatedPtr is empty.
*
* The visitor must meet the following requirements:
*
* - The visitor must allow invocation as a function by overloading
* operator(), unambiguously accepting all values of type T* (or const T*)
* for all T in Types...
* - All operations of the function object on T* (or const T*) must
* return the same type (or a static_assert will fire).
*/
template <typename V>
typename dptr_detail::VisitorResult<V, Types...>::type apply(V&& visitor) {
size_t n = index();
if (n == 0) throw std::invalid_argument("Empty DiscriminatedPtr");
return dptr_detail::ApplyVisitor<V, Types...>()(
n, std::forward<V>(visitor), ptr());
}
template <typename V>
typename dptr_detail::ConstVisitorResult<V, Types...>::type apply(V&& visitor)
const {
size_t n = index();
if (n == 0) throw std::invalid_argument("Empty DiscriminatedPtr");
return dptr_detail::ApplyConstVisitor<V, Types...>()(
n, std::forward<V>(visitor), ptr());
}
private:
/**
* Get the 1-based type index of T in Types.
*/
template <typename T>
size_t typeIndex() const {
return dptr_detail::GetTypeIndex<T, Types...>::value;
}
uint16_t index() const { return data_ >> 48; }
void* ptr() const {
return reinterpret_cast<void*>(data_ & ((1ULL << 48) - 1));
}
void set(void* p, uint16_t v) {
uintptr_t ip = reinterpret_cast<uintptr_t>(p);
CHECK(!(ip >> 48));
ip |= static_cast<uintptr_t>(v) << 48;
data_ = ip;
}
/**
* We store a pointer in the least significant 48 bits of data_, and a type
* index (0 = empty, or 1-based index in Types) in the most significant 16
* bits. We rely on the fact that pointers have their most significant 16
* bits clear on x86_64.
*/
uintptr_t data_;
};
} // namespace folly

355
ios/Pods/Folly/folly/DynamicConverter.h generated Normal file
View File

@ -0,0 +1,355 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// @author Nicholas Ormrod <njormrod@fb.com>
#pragma once
#include <folly/dynamic.h>
namespace folly {
template <typename T> T convertTo(const dynamic&);
template <typename T> dynamic toDynamic(const T&);
}
/**
* convertTo returns a well-typed representation of the input dynamic.
*
* Example:
*
* dynamic d = dynamic::array(
* dynamic::array(1, 2, 3),
* dynamic::array(4, 5)); // a vector of vector of int
* auto vvi = convertTo<fbvector<fbvector<int>>>(d);
*
* See docs/DynamicConverter.md for supported types and customization
*/
#include <type_traits>
#include <iterator>
#include <boost/iterator/iterator_adaptor.hpp>
#include <boost/mpl/has_xxx.hpp>
#include <folly/Likely.h>
namespace folly {
///////////////////////////////////////////////////////////////////////////////
// traits
namespace dynamicconverter_detail {
BOOST_MPL_HAS_XXX_TRAIT_DEF(value_type);
BOOST_MPL_HAS_XXX_TRAIT_DEF(iterator);
BOOST_MPL_HAS_XXX_TRAIT_DEF(mapped_type);
template <typename T> struct iterator_class_is_container {
typedef std::reverse_iterator<typename T::iterator> some_iterator;
enum { value = has_value_type<T>::value &&
std::is_constructible<T, some_iterator, some_iterator>::value };
};
template <typename T>
using class_is_container = typename
std::conditional<
has_iterator<T>::value,
iterator_class_is_container<T>,
std::false_type
>::type;
template <typename T> struct class_is_range {
enum { value = has_value_type<T>::value &&
has_iterator<T>::value };
};
template <typename T> struct is_container
: std::conditional<
std::is_class<T>::value,
class_is_container<T>,
std::false_type
>::type {};
template <typename T> struct is_range
: std::conditional<
std::is_class<T>::value,
class_is_range<T>,
std::false_type
>::type {};
template <typename T> struct is_map
: std::integral_constant<
bool,
is_range<T>::value && has_mapped_type<T>::value
> {};
} // namespace dynamicconverter_detail
///////////////////////////////////////////////////////////////////////////////
// custom iterators
/**
* We have iterators that dereference to dynamics, but need iterators
* that dereference to typename T.
*
* Implementation details:
* 1. We cache the value of the dereference operator. This is necessary
* because boost::iterator_adaptor requires *it to return a
* reference.
* 2. For const reasons, we cannot call operator= to refresh the
* cache: we must call the destructor then placement new.
*/
namespace dynamicconverter_detail {
template<typename T>
struct Dereferencer {
static inline void derefToCache(
T* /* mem */, const dynamic::const_item_iterator& /* it */) {
throw TypeError("array", dynamic::Type::OBJECT);
}
static inline void derefToCache(T* mem, const dynamic::const_iterator& it) {
new (mem) T(convertTo<T>(*it));
}
};
template<typename F, typename S>
struct Dereferencer<std::pair<F, S>> {
static inline void
derefToCache(std::pair<F, S>* mem, const dynamic::const_item_iterator& it) {
new (mem) std::pair<F, S>(
convertTo<F>(it->first), convertTo<S>(it->second)
);
}
// Intentional duplication of the code in Dereferencer
template <typename T>
static inline void derefToCache(T* mem, const dynamic::const_iterator& it) {
new (mem) T(convertTo<T>(*it));
}
};
template <typename T, typename It>
class Transformer : public boost::iterator_adaptor<
Transformer<T, It>,
It,
typename T::value_type
> {
friend class boost::iterator_core_access;
typedef typename T::value_type ttype;
mutable ttype cache_;
mutable bool valid_;
void increment() {
++this->base_reference();
valid_ = false;
}
ttype& dereference() const {
if (LIKELY(!valid_)) {
cache_.~ttype();
Dereferencer<ttype>::derefToCache(&cache_, this->base_reference());
valid_ = true;
}
return cache_;
}
public:
explicit Transformer(const It& it)
: Transformer::iterator_adaptor_(it), valid_(false) {}
};
// conversion factory
template <typename T, typename It>
inline std::move_iterator<Transformer<T, It>>
conversionIterator(const It& it) {
return std::make_move_iterator(Transformer<T, It>(it));
}
} // namespace dynamicconverter_detail
///////////////////////////////////////////////////////////////////////////////
// DynamicConverter specializations
/**
* Each specialization of DynamicConverter has the function
* 'static T convert(const dynamic&);'
*/
// default - intentionally unimplemented
template <typename T, typename Enable = void> struct DynamicConverter;
// boolean
template <>
struct DynamicConverter<bool> {
static bool convert(const dynamic& d) {
return d.asBool();
}
};
// integrals
template <typename T>
struct DynamicConverter<T,
typename std::enable_if<std::is_integral<T>::value &&
!std::is_same<T, bool>::value>::type> {
static T convert(const dynamic& d) {
return folly::to<T>(d.asInt());
}
};
// enums
template <typename T>
struct DynamicConverter<T,
typename std::enable_if<std::is_enum<T>::value>::type> {
static T convert(const dynamic& d) {
using type = typename std::underlying_type<T>::type;
return static_cast<T>(DynamicConverter<type>::convert(d));
}
};
// floating point
template <typename T>
struct DynamicConverter<T,
typename std::enable_if<std::is_floating_point<T>::value>::type> {
static T convert(const dynamic& d) {
return folly::to<T>(d.asDouble());
}
};
// fbstring
template <>
struct DynamicConverter<folly::fbstring> {
static folly::fbstring convert(const dynamic& d) {
return d.asString();
}
};
// std::string
template <>
struct DynamicConverter<std::string> {
static std::string convert(const dynamic& d) {
return d.asString();
}
};
// std::pair
template <typename F, typename S>
struct DynamicConverter<std::pair<F,S>> {
static std::pair<F, S> convert(const dynamic& d) {
if (d.isArray() && d.size() == 2) {
return std::make_pair(convertTo<F>(d[0]), convertTo<S>(d[1]));
} else if (d.isObject() && d.size() == 1) {
auto it = d.items().begin();
return std::make_pair(convertTo<F>(it->first), convertTo<S>(it->second));
} else {
throw TypeError("array (size 2) or object (size 1)", d.type());
}
}
};
// containers
template <typename C>
struct DynamicConverter<C,
typename std::enable_if<
dynamicconverter_detail::is_container<C>::value>::type> {
static C convert(const dynamic& d) {
if (d.isArray()) {
return C(dynamicconverter_detail::conversionIterator<C>(d.begin()),
dynamicconverter_detail::conversionIterator<C>(d.end()));
} else if (d.isObject()) {
return C(dynamicconverter_detail::conversionIterator<C>
(d.items().begin()),
dynamicconverter_detail::conversionIterator<C>
(d.items().end()));
} else {
throw TypeError("object or array", d.type());
}
}
};
///////////////////////////////////////////////////////////////////////////////
// DynamicConstructor specializations
/**
* Each specialization of DynamicConstructor has the function
* 'static dynamic construct(const C&);'
*/
// default
template <typename C, typename Enable = void>
struct DynamicConstructor {
static dynamic construct(const C& x) {
return dynamic(x);
}
};
// maps
template<typename C>
struct DynamicConstructor<C,
typename std::enable_if<
dynamicconverter_detail::is_map<C>::value>::type> {
static dynamic construct(const C& x) {
dynamic d = dynamic::object;
for (auto& pair : x) {
d.insert(toDynamic(pair.first), toDynamic(pair.second));
}
return d;
}
};
// other ranges
template<typename C>
struct DynamicConstructor<C,
typename std::enable_if<
!dynamicconverter_detail::is_map<C>::value &&
!std::is_constructible<StringPiece, const C&>::value &&
dynamicconverter_detail::is_range<C>::value>::type> {
static dynamic construct(const C& x) {
dynamic d = dynamic::array;
for (auto& item : x) {
d.push_back(toDynamic(item));
}
return d;
}
};
// pair
template<typename A, typename B>
struct DynamicConstructor<std::pair<A, B>, void> {
static dynamic construct(const std::pair<A, B>& x) {
dynamic d = dynamic::array;
d.push_back(toDynamic(x.first));
d.push_back(toDynamic(x.second));
return d;
}
};
///////////////////////////////////////////////////////////////////////////////
// implementation
template <typename T>
T convertTo(const dynamic& d) {
return DynamicConverter<typename std::remove_cv<T>::type>::convert(d);
}
template<typename T>
dynamic toDynamic(const T& x) {
return DynamicConstructor<typename std::remove_cv<T>::type>::construct(x);
}
} // namespace folly

165
ios/Pods/Folly/folly/Enumerate.h generated Normal file
View File

@ -0,0 +1,165 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <iterator>
#include <memory>
#include <folly/portability/SysTypes.h>
/*
* Similar to Python's enumerate(), folly::enumerate() can be used to
* iterate a range with a for-range loop, and it also allows to
* retrieve the count of iterations so far.
*
* For example:
*
* for (auto it : folly::enumerate(vec)) {
* // *it is a reference to the current element. Const if vec is const.
* // it->member can be used as well.
* // it.index contains the iteration count.
* }
*
* If the iteration variable is const, the reference is too.
*
* for (const auto it : folly::enumerate(vec)) {
* // *it is always a const reference.
* }
*
* @author Giuseppe Ottaviano <ott@fb.com>
*/
namespace folly {
namespace detail {
template <class T>
struct MakeConst {
using type = const T;
};
template <class T>
struct MakeConst<T&> {
using type = const T&;
};
template <class T>
struct MakeConst<T*> {
using type = const T*;
};
// Raw pointers don't have an operator->() member function, so the
// second overload will be SFINAEd out in that case. Otherwise, the
// second is preferred in the partial order for getPointer(_, 0).
template <class Iterator>
auto getPointer(const Iterator& it, long) -> decltype(std::addressof(*it)) {
return std::addressof(*it);
}
template <class Iterator>
auto getPointer(const Iterator& it, int) -> decltype(it.operator->()) {
return it.operator->();
}
template <class Iterator>
class Enumerator {
public:
explicit Enumerator(Iterator it) : it_(std::move(it)) {}
class Proxy {
public:
using difference_type = ssize_t;
using value_type = typename std::iterator_traits<Iterator>::value_type;
using reference = typename std::iterator_traits<Iterator>::reference;
using pointer = typename std::iterator_traits<Iterator>::pointer;
using iterator_category = std::input_iterator_tag;
explicit Proxy(const Enumerator* e) : it_(e->it_), index(e->idx_) {}
// Non-const Proxy: Forward constness from Iterator.
reference operator*() {
return *it_;
}
pointer operator->() {
return getPointer(it_, 0);
}
// Const Proxy: Force const references.
typename MakeConst<reference>::type operator*() const {
return *it_;
}
typename MakeConst<pointer>::type operator->() const {
return getPointer(it_, 0);
}
private:
const Iterator& it_;
public:
const size_t index;
};
Proxy operator*() const {
return Proxy(this);
}
Enumerator& operator++() {
++it_;
++idx_;
return *this;
}
template <typename OtherIterator>
bool operator==(const Enumerator<OtherIterator>& rhs) {
return it_ == rhs.it_;
}
template <typename OtherIterator>
bool operator!=(const Enumerator<OtherIterator>& rhs) {
return !(*this == rhs);
}
private:
template <typename OtherIterator>
friend class Enumerator;
Iterator it_;
size_t idx_ = 0;
};
template <class Range>
class RangeEnumerator {
Range r_;
using BeginIteratorType = decltype(std::declval<Range>().begin());
using EndIteratorType = decltype(std::declval<Range>().end());
public:
explicit RangeEnumerator(Range&& r) : r_(std::forward<Range>(r)) {}
Enumerator<BeginIteratorType> begin() {
return Enumerator<BeginIteratorType>(r_.begin());
}
Enumerator<EndIteratorType> end() {
return Enumerator<EndIteratorType>(r_.end());
}
};
} // namespace detail
template <class Range>
detail::RangeEnumerator<Range> enumerate(Range&& r) {
return detail::RangeEnumerator<Range>(std::forward<Range>(r));
}
} // namespace folly

494
ios/Pods/Folly/folly/EvictingCacheMap.h generated Normal file
View File

@ -0,0 +1,494 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <algorithm>
#include <exception>
#include <functional>
#include <boost/utility.hpp>
#include <boost/intrusive/list.hpp>
#include <boost/intrusive/unordered_set.hpp>
#include <boost/iterator/iterator_adaptor.hpp>
#include <folly/portability/BitsFunctexcept.h>
namespace folly {
/**
* A general purpose LRU evicting cache. Designed to support constant time
* set/get operations. It maintains a doubly linked list of items that are
* threaded through an index (a hash map). The access ordered is maintained
* on the list by moving an element to the front of list on a get. New elements
* are added to the front of the list. The index size is set to half the
* capacity (setting capacity to 0 is a special case. see notes at the end of
* this section). So assuming uniform distribution of keys, set/get are both
* constant time operations.
*
* On reaching capacity limit, clearSize_ LRU items are evicted at a time. If
* a callback is specified with setPruneHook, it is invoked for each eviction.
*
* This is NOT a thread-safe implementation.
*
* Configurability: capacity of the cache, number of items to evict, eviction
* callback and the hasher to hash the keys can all be supplied by the caller.
*
* If at a given state, N1 - N6 are the nodes in MRU to LRU order and hashing
* to index keys as {(N1,N5)->H1, (N4,N5,N5)->H2, N3->Hi}, the datastructure
* layout is as below. N1 .. N6 is a list threaded through the hash.
* Assuming, each the number of nodes hashed to each index key is bounded, the
* following operations run in constant time.
* i) get computes the index key, walks the list of elements hashed to
* the key and moves it to the front of the list, if found.
* ii) set inserts a new node into the list and places the same node on to the
* list of elements hashing to the corresponding index key.
* ii) prune deletes nodes from the end of the list as well from the index.
*
* +----+ +----+ +----+
* | H1 | <-> | N1 | <-> | N5 |
* +----+ +----+ +----+
* ^ ^ ^
* | ___/ \
* | / \
* |_ /________ \___
* / | \
* / | \
* v v v
* +----+ +----+ +----+ +----+
* | H2 | <-> | N4 | <-> | N2 | <-> | N6 |
* +----+ +----+ +----+ +----+
* . ^ ^
* . | |
* . | |
* . | _____|
* . | /
* v v
* +----+ +----+
* | Hi | <-> | N3 |
* +----+ +----+
*
* N.B 1 : Changing the capacity with setMaxSize does not change the index size
* and it could end up in too many elements indexed to the same slot in index.
* The set/get performance will get worse in this case. So it is best to avoid
* resizing.
*
* N.B 2 : Setting capacity to 0, using setMaxSize or initialization, turns off
* evictions based on sizeof the cache making it an INFINITE size cache
* unless evictions of LRU items are triggered by calling prune() by clients
* (using their own eviction criteria).
*/
template <class TKey, class TValue, class THash = std::hash<TKey> >
class EvictingCacheMap : private boost::noncopyable {
private:
// typedefs for brevity
struct Node;
typedef boost::intrusive::link_mode<boost::intrusive::safe_link> link_mode;
typedef boost::intrusive::unordered_set<Node> NodeMap;
typedef boost::intrusive::list<Node> NodeList;
typedef std::pair<const TKey, TValue> TPair;
public:
typedef std::function<void(TKey, TValue&&)> PruneHookCall;
// iterator base : returns TPair on dereference
template <typename Value, typename TIterator>
class iterator_base
: public boost::iterator_adaptor<iterator_base<Value, TIterator>,
TIterator,
Value,
boost::bidirectional_traversal_tag > {
public:
iterator_base() {
}
explicit iterator_base(TIterator it)
: iterator_base::iterator_adaptor_(it) {
}
Value& dereference() const {
return this->base_reference()->pr;
}
};
// iterators
typedef iterator_base<
TPair, typename NodeList::iterator> iterator;
typedef iterator_base<
const TPair, typename NodeList::const_iterator> const_iterator;
typedef iterator_base<
TPair, typename NodeList::reverse_iterator> reverse_iterator;
typedef iterator_base<
const TPair,
typename NodeList::const_reverse_iterator> const_reverse_iterator;
/**
* Construct a EvictingCacheMap
* @param maxSize maximum size of the cache map. Once the map size exceeds
* maxSize, the map will begin to evict.
* @param clearSize the number of elements to clear at a time when the
* eviction size is reached.
*/
explicit EvictingCacheMap(std::size_t maxSize, std::size_t clearSize = 1)
: nIndexBuckets_(std::max(maxSize / 2, std::size_t(kMinNumIndexBuckets))),
indexBuckets_(new typename NodeMap::bucket_type[nIndexBuckets_]),
indexTraits_(indexBuckets_.get(), nIndexBuckets_),
index_(indexTraits_),
maxSize_(maxSize),
clearSize_(clearSize) { }
~EvictingCacheMap() {
setPruneHook(nullptr);
// ignore any potential exceptions from pruneHook_
pruneWithFailSafeOption(size(), nullptr, true);
}
/**
* Adjust the max size of EvictingCacheMap. Note that this does not update
* nIndexBuckets_ accordingly. This API can cause performance to get very
* bad, e.g., the nIndexBuckets_ is still 100 after maxSize is updated to 1M.
*
* Calling this function with an arugment of 0 removes the limit on the cache
* size and elements are not evicted unless clients explictly call prune.
*
* If you intend to resize dynamically using this, then picking an index size
* that works well and initializing with corresponding maxSize is the only
* reasonable option.
*
* @param maxSize new maximum size of the cache map.
* @param pruneHook callback to use on eviction.
*/
void setMaxSize(size_t maxSize, PruneHookCall pruneHook = nullptr) {
if (maxSize != 0 && maxSize < size()) {
// Prune the excess elements with our new constraints.
prune(std::max(size() - maxSize, clearSize_), pruneHook);
}
maxSize_ = maxSize;
}
size_t getMaxSize() const {
return maxSize_;
}
void setClearSize(size_t clearSize) {
clearSize_ = clearSize;
}
/**
* Check for existence of a specific key in the map. This operation has
* no effect on LRU order.
* @param key key to search for
* @return true if exists, false otherwise
*/
bool exists(const TKey& key) const {
return findInIndex(key) != index_.end();
}
/**
* Get the value associated with a specific key. This function always
* promotes a found value to the head of the LRU.
* @param key key associated with the value
* @return the value if it exists
* @throw std::out_of_range exception of the key does not exist
*/
TValue& get(const TKey& key) {
auto it = find(key);
if (it == end()) {
std::__throw_out_of_range("Key does not exist");
}
return it->second;
}
/**
* Get the iterator associated with a specific key. This function always
* promotes a found value to the head of the LRU.
* @param key key to associate with value
* @return the iterator of the object (a std::pair of const TKey, TValue) or
* end() if it does not exist
*/
iterator find(const TKey& key) {
auto it = findInIndex(key);
if (it == index_.end()) {
return end();
}
lru_.erase(lru_.iterator_to(*it));
lru_.push_front(*it);
return iterator(lru_.iterator_to(*it));
}
/**
* Get the value associated with a specific key. This function never
* promotes a found value to the head of the LRU.
* @param key key associated with the value
* @return the value if it exists
* @throw std::out_of_range exception of the key does not exist
*/
const TValue& getWithoutPromotion(const TKey& key) const {
auto it = findWithoutPromotion(key);
if (it == end()) {
std::__throw_out_of_range("Key does not exist");
}
return it->second;
}
TValue& getWithoutPromotion(const TKey& key) {
auto const& cThis = *this;
return const_cast<TValue&>(cThis.getWithoutPromotion(key));
}
/**
* Get the iterator associated with a specific key. This function never
* promotes a found value to the head of the LRU.
* @param key key to associate with value
* @return the iterator of the object (a std::pair of const TKey, TValue) or
* end() if it does not exist
*/
const_iterator findWithoutPromotion(const TKey& key) const {
auto it = findInIndex(key);
return (it == index_.end()) ? end() : const_iterator(lru_.iterator_to(*it));
}
iterator findWithoutPromotion(const TKey& key) {
auto it = findInIndex(key);
return (it == index_.end()) ? end() : iterator(lru_.iterator_to(*it));
}
/**
* Erase the key-value pair associated with key if it exists.
* @param key key associated with the value
* @return true if the key existed and was erased, else false
*/
bool erase(const TKey& key) {
auto it = findInIndex(key);
if (it == index_.end()) {
return false;
}
auto node = &(*it);
std::unique_ptr<Node> nptr(node);
lru_.erase(lru_.iterator_to(*node));
index_.erase(it);
return true;
}
/**
* Set a key-value pair in the dictionary
* @param key key to associate with value
* @param value value to associate with the key
* @param promote boolean flag indicating whether or not to move something
* to the front of an LRU. This only really matters if you're setting
* a value that already exists.
* @param pruneHook callback to use on eviction (if it occurs).
*/
void set(const TKey& key,
TValue value,
bool promote = true,
PruneHookCall pruneHook = nullptr) {
auto it = findInIndex(key);
if (it != index_.end()) {
it->pr.second = std::move(value);
if (promote) {
lru_.erase(lru_.iterator_to(*it));
lru_.push_front(*it);
}
} else {
auto node = new Node(key, std::move(value));
index_.insert(*node);
lru_.push_front(*node);
// no evictions if maxSize_ is 0 i.e. unlimited capacity
if (maxSize_ > 0 && size() > maxSize_) {
prune(clearSize_, pruneHook);
}
}
}
/**
* Get the number of elements in the dictionary
* @return the size of the dictionary
*/
std::size_t size() const {
return index_.size();
}
/**
* Typical empty function
* @return true if empty, false otherwise
*/
bool empty() const {
return index_.empty();
}
void clear(PruneHookCall pruneHook = nullptr) {
prune(size(), pruneHook);
}
/**
* Set the prune hook, which is the function invoked on the key and value
* on each eviction. Will throw If the pruneHook throws, unless the
* EvictingCacheMap object is being destroyed in which case it will
* be ignored.
* @param pruneHook new callback to use on eviction.
* @param promote boolean flag indicating whether or not to move something
* to the front of an LRU.
* @return the iterator of the object (a std::pair of const TKey, TValue) or
* end() if it does not exist
*/
void setPruneHook(PruneHookCall pruneHook) {
pruneHook_ = pruneHook;
}
/**
* Prune the minimum of pruneSize and size() from the back of the LRU.
* Will throw if pruneHook throws.
* @param pruneSize minimum number of elements to prune
* @param pruneHook a custom pruneHook function
*/
void prune(std::size_t pruneSize, PruneHookCall pruneHook = nullptr) {
// do not swallow exceptions for prunes not triggered from destructor
pruneWithFailSafeOption(pruneSize, pruneHook, false);
}
// Iterators and such
iterator begin() {
return iterator(lru_.begin());
}
iterator end() {
return iterator(lru_.end());
}
const_iterator begin() const {
return const_iterator(lru_.begin());
}
const_iterator end() const {
return const_iterator(lru_.end());
}
const_iterator cbegin() const {
return const_iterator(lru_.cbegin());
}
const_iterator cend() const {
return const_iterator(lru_.cend());
}
reverse_iterator rbegin() {
return reverse_iterator(lru_.rbegin());
}
reverse_iterator rend() {
return reverse_iterator(lru_.rend());
}
const_reverse_iterator rbegin() const {
return const_reverse_iterator(lru_.rbegin());
}
const_reverse_iterator rend() const {
return const_reverse_iterator(lru_.rend());
}
const_reverse_iterator crbegin() const {
return const_reverse_iterator(lru_.crbegin());
}
const_reverse_iterator crend() const {
return const_reverse_iterator(lru_.crend());
}
private:
struct Node
: public boost::intrusive::unordered_set_base_hook<link_mode>,
public boost::intrusive::list_base_hook<link_mode> {
Node(const TKey& key, TValue&& value)
: pr(std::make_pair(key, std::move(value))) {
}
TPair pr;
friend bool operator==(const Node& lhs, const Node& rhs) {
return lhs.pr.first == rhs.pr.first;
}
friend std::size_t hash_value(const Node& node) {
return THash()(node.pr.first);
}
};
struct KeyHasher {
std::size_t operator()(const Node& node) {
return THash()(node.pr.first);
}
std::size_t operator()(const TKey& key) {
return THash()(key);
}
};
struct KeyValueEqual {
bool operator()(const TKey& lhs, const Node& rhs) {
return lhs == rhs.pr.first;
}
bool operator()(const Node& lhs, const TKey& rhs) {
return lhs.pr.first == rhs;
}
};
/**
* Get the iterator in in the index associated with a specific key. This is
* merely a search in the index and does not promote the object.
* @param key key to associate with value
* @return the NodeMap::iterator to the Node containing the object
* (a std::pair of const TKey, TValue) or index_.end() if it does not exist
*/
typename NodeMap::iterator findInIndex(const TKey& key) {
return index_.find(key, KeyHasher(), KeyValueEqual());
}
typename NodeMap::const_iterator findInIndex(const TKey& key) const {
return index_.find(key, KeyHasher(), KeyValueEqual());
}
/**
* Prune the minimum of pruneSize and size() from the back of the LRU.
* @param pruneSize minimum number of elements to prune
* @param pruneHook a custom pruneHook function
* @param failSafe true if exceptions are to ignored, false by default
*/
void pruneWithFailSafeOption(std::size_t pruneSize,
PruneHookCall pruneHook, bool failSafe) {
auto& ph = (nullptr == pruneHook) ? pruneHook_ : pruneHook;
for (std::size_t i = 0; i < pruneSize && !lru_.empty(); i++) {
auto *node = &(*lru_.rbegin());
std::unique_ptr<Node> nptr(node);
lru_.erase(lru_.iterator_to(*node));
index_.erase(index_.iterator_to(*node));
if (ph) {
try {
ph(node->pr.first, std::move(node->pr.second));
} catch (...) {
if (!failSafe) {
throw;
}
}
}
}
}
static const std::size_t kMinNumIndexBuckets = 100;
PruneHookCall pruneHook_;
std::size_t nIndexBuckets_;
std::unique_ptr<typename NodeMap::bucket_type[]> indexBuckets_;
typename NodeMap::bucket_traits indexTraits_;
NodeMap index_;
NodeList lru_;
std::size_t maxSize_;
std::size_t clearSize_;
};
} // folly

120
ios/Pods/Folly/folly/Exception.h generated Normal file
View File

@ -0,0 +1,120 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <errno.h>
#include <cstdio>
#include <stdexcept>
#include <system_error>
#include <folly/Conv.h>
#include <folly/FBString.h>
#include <folly/Likely.h>
#include <folly/Portability.h>
namespace folly {
// Various helpers to throw appropriate std::system_error exceptions from C
// library errors (returned in errno, as positive return values (many POSIX
// functions), or as negative return values (Linux syscalls))
//
// The *Explicit functions take an explicit value for errno.
// Helper to throw std::system_error
[[noreturn]] inline void throwSystemErrorExplicit(int err, const char* msg) {
throw std::system_error(err, std::system_category(), msg);
}
template <class... Args>
[[noreturn]] void throwSystemErrorExplicit(int err, Args&&... args) {
throwSystemErrorExplicit(
err, to<fbstring>(std::forward<Args>(args)...).c_str());
}
// Helper to throw std::system_error from errno and components of a string
template <class... Args>
[[noreturn]] void throwSystemError(Args&&... args) {
throwSystemErrorExplicit(errno, std::forward<Args>(args)...);
}
// Check a Posix return code (0 on success, error number on error), throw
// on error.
template <class... Args>
void checkPosixError(int err, Args&&... args) {
if (UNLIKELY(err != 0)) {
throwSystemErrorExplicit(err, std::forward<Args>(args)...);
}
}
// Check a Linux kernel-style return code (>= 0 on success, negative error
// number on error), throw on error.
template <class... Args>
void checkKernelError(ssize_t ret, Args&&... args) {
if (UNLIKELY(ret < 0)) {
throwSystemErrorExplicit(-ret, std::forward<Args>(args)...);
}
}
// Check a traditional Unix return code (-1 and sets errno on error), throw
// on error.
template <class... Args>
void checkUnixError(ssize_t ret, Args&&... args) {
if (UNLIKELY(ret == -1)) {
throwSystemError(std::forward<Args>(args)...);
}
}
template <class... Args>
void checkUnixErrorExplicit(ssize_t ret, int savedErrno, Args&&... args) {
if (UNLIKELY(ret == -1)) {
throwSystemErrorExplicit(savedErrno, std::forward<Args>(args)...);
}
}
// Check the return code from a fopen-style function (returns a non-nullptr
// FILE* on success, nullptr on error, sets errno). Works with fopen, fdopen,
// freopen, tmpfile, etc.
template <class... Args>
void checkFopenError(FILE* fp, Args&&... args) {
if (UNLIKELY(!fp)) {
throwSystemError(std::forward<Args>(args)...);
}
}
template <class... Args>
void checkFopenErrorExplicit(FILE* fp, int savedErrno, Args&&... args) {
if (UNLIKELY(!fp)) {
throwSystemErrorExplicit(savedErrno, std::forward<Args>(args)...);
}
}
template <typename E, typename V, typename... Args>
void throwOnFail(V&& value, Args&&... args) {
if (!value) {
throw E(std::forward<Args>(args)...);
}
}
/**
* If cond is not true, raise an exception of type E. E must have a ctor that
* works with const char* (a description of the failure).
*/
#define CHECK_THROW(cond, E) \
::folly::throwOnFail<E>((cond), "Check failed: " #cond)
} // namespace folly

69
ios/Pods/Folly/folly/ExceptionString.h generated Normal file
View File

@ -0,0 +1,69 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <exception>
#include <string>
#include <type_traits>
#include <folly/Demangle.h>
#include <folly/FBString.h>
#include <folly/Portability.h>
namespace folly {
/**
* Debug string for an exception: include type and what(), if
* defined.
*/
inline fbstring exceptionStr(const std::exception& e) {
#ifdef FOLLY_HAS_RTTI
fbstring rv(demangle(typeid(e)));
rv += ": ";
#else
fbstring rv("Exception (no RTTI available): ");
#endif
rv += e.what();
return rv;
}
// Empirically, this indicates if the runtime supports
// std::exception_ptr, as not all (arm, for instance) do.
#if defined(__GNUC__) && defined(__GCC_ATOMIC_INT_LOCK_FREE) && \
__GCC_ATOMIC_INT_LOCK_FREE > 1
inline fbstring exceptionStr(std::exception_ptr ep) {
try {
std::rethrow_exception(ep);
} catch (const std::exception& e) {
return exceptionStr(e);
} catch (...) {
return "<unknown exception>";
}
}
#endif
template <typename E>
auto exceptionStr(const E& e) -> typename std::
enable_if<!std::is_base_of<std::exception, E>::value, fbstring>::type {
#ifdef FOLLY_HAS_RTTI
return demangle(typeid(e));
#else
return "Exception (no RTTI available)";
#endif
}
} // namespace folly

480
ios/Pods/Folly/folly/ExceptionWrapper.h generated Normal file
View File

@ -0,0 +1,480 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cassert>
#include <exception>
#include <iostream>
#include <memory>
#include <folly/ExceptionString.h>
#include <folly/detail/ExceptionWrapper.h>
namespace folly {
/*
* Throwing exceptions can be a convenient way to handle errors. Storing
* exceptions in an exception_ptr makes it easy to handle exceptions in a
* different thread or at a later time. exception_ptr can also be used in a very
* generic result/exception wrapper.
*
* However, there are some issues with throwing exceptions and
* std::exception_ptr. These issues revolve around throw being expensive,
* particularly in a multithreaded environment (see
* ExceptionWrapperBenchmark.cpp).
*
* Imagine we have a library that has an API which returns a result/exception
* wrapper. Let's consider some approaches for implementing this wrapper.
* First, we could store a std::exception. This approach loses the derived
* exception type, which can make exception handling more difficult for users
* that prefer rethrowing the exception. We could use a folly::dynamic for every
* possible type of exception. This is not very flexible - adding new types of
* exceptions requires a change to the result/exception wrapper. We could use an
* exception_ptr. However, constructing an exception_ptr as well as accessing
* the error requires a call to throw. That means that there will be two calls
* to throw in order to process the exception. For performance sensitive
* applications, this may be unacceptable.
*
* exception_wrapper is designed to handle exception management for both
* convenience and high performance use cases. make_exception_wrapper is
* templated on derived type, allowing us to rethrow the exception properly for
* users that prefer convenience. These explicitly named exception types can
* therefore be handled without any peformance penalty. exception_wrapper is
* also flexible enough to accept any type. If a caught exception is not of an
* explicitly named type, then std::exception_ptr is used to preserve the
* exception state. For performance sensitive applications, the accessor methods
* can test or extract a pointer to a specific exception type with very little
* overhead.
*
* Example usage:
*
* exception_wrapper globalExceptionWrapper;
*
* // Thread1
* void doSomethingCrazy() {
* int rc = doSomethingCrazyWithLameReturnCodes();
* if (rc == NAILED_IT) {
* globalExceptionWrapper = exception_wrapper();
* } else if (rc == FACE_PLANT) {
* globalExceptionWrapper = make_exception_wrapper<FacePlantException>();
* } else if (rc == FAIL_WHALE) {
* globalExceptionWrapper = make_exception_wrapper<FailWhaleException>();
* }
* }
*
* // Thread2: Exceptions are ok!
* void processResult() {
* try {
* globalExceptionWrapper.throwException();
* } catch (const FacePlantException& e) {
* LOG(ERROR) << "FACEPLANT!";
* } catch (const FailWhaleException& e) {
* LOG(ERROR) << "FAILWHALE!";
* }
* }
*
* // Thread2: Exceptions are bad!
* void processResult() {
* globalExceptionWrapper.with_exception(
* [&](FacePlantException& faceplant) {
* LOG(ERROR) << "FACEPLANT";
* }) ||
* globalExceptionWrapper.with_exception(
* [&](FailWhaleException& failwhale) {
* LOG(ERROR) << "FAILWHALE!";
* }) ||
* LOG(FATAL) << "Unrecognized exception";
* }
*
*/
class exception_wrapper {
protected:
template <typename Ex>
struct optimize;
public:
exception_wrapper() = default;
// Implicitly construct an exception_wrapper from a qualifying exception.
// See the optimize struct for details.
template <typename Ex, typename =
typename std::enable_if<optimize<typename std::decay<Ex>::type>::value>
::type>
/* implicit */ exception_wrapper(Ex&& exn) {
typedef typename std::decay<Ex>::type DEx;
item_ = std::make_shared<DEx>(std::forward<Ex>(exn));
throwfn_ = folly::detail::Thrower<DEx>::doThrow;
}
// The following two constructors are meant to emulate the behavior of
// try_and_catch in performance sensitive code as well as to be flexible
// enough to wrap exceptions of unknown type. There is an overload that
// takes an exception reference so that the wrapper can extract and store
// the exception's type and what() when possible.
//
// The canonical use case is to construct an all-catching exception wrapper
// with minimal overhead like so:
//
// try {
// // some throwing code
// } catch (const std::exception& e) {
// // won't lose e's type and what()
// exception_wrapper ew{std::current_exception(), e};
// } catch (...) {
// // everything else
// exception_wrapper ew{std::current_exception()};
// }
//
// try_and_catch is cleaner and preferable. Use it unless you're sure you need
// something like this instead.
template <typename Ex>
explicit exception_wrapper(std::exception_ptr eptr, Ex& exn) {
assign_eptr(eptr, exn);
}
explicit exception_wrapper(std::exception_ptr eptr) {
assign_eptr(eptr);
}
// If the exception_wrapper does not contain an exception, std::terminate()
// is invoked to assure the [[noreturn]] behaviour.
[[noreturn]] void throwException() const {
if (throwfn_) {
throwfn_(item_.get());
} else if (eptr_) {
std::rethrow_exception(eptr_);
}
std::cerr
<< "Cannot use `throwException` with an empty folly::exception_wrapper"
<< std::endl;
std::terminate();
}
explicit operator bool() const {
return item_ || eptr_;
}
// This implementation is similar to std::exception_ptr's implementation
// where two exception_wrappers are equal when the address in the underlying
// reference field both point to the same exception object. The reference
// field remains the same when the exception_wrapper is copied or when
// the exception_wrapper is "rethrown".
bool operator==(const exception_wrapper& a) const {
if (item_) {
return a.item_ && item_.get() == a.item_.get();
} else {
return eptr_ == a.eptr_;
}
}
bool operator!=(const exception_wrapper& a) const {
return !(*this == a);
}
// This will return a non-nullptr only if the exception is held as a
// copy. It is the only interface which will distinguish between an
// exception held this way, and by exception_ptr. You probably
// shouldn't use it at all.
std::exception* getCopied() { return item_.get(); }
const std::exception* getCopied() const { return item_.get(); }
fbstring what() const {
if (item_) {
return exceptionStr(*item_);
} else if (eptr_) {
return estr_;
} else {
return fbstring();
}
}
fbstring class_name() const {
if (item_) {
auto& i = *item_;
return demangle(typeid(i));
} else if (eptr_) {
return ename_;
} else {
return fbstring();
}
}
template <class Ex>
bool is_compatible_with() const {
if (item_) {
return dynamic_cast<const Ex*>(item_.get());
} else if (eptr_) {
try {
std::rethrow_exception(eptr_);
} catch (typename std::decay<Ex>::type&) {
return true;
} catch (...) {
// fall through
}
}
return false;
}
template <class F>
bool with_exception(F&& f) {
using arg_type = typename functor_traits<F>::arg_type_decayed;
return with_exception<arg_type>(std::forward<F>(f));
}
template <class F>
bool with_exception(F&& f) const {
using arg_type = typename functor_traits<F>::arg_type_decayed;
return with_exception<const arg_type>(std::forward<F>(f));
}
// If this exception wrapper wraps an exception of type Ex, with_exception
// will call f with the wrapped exception as an argument and return true, and
// will otherwise return false.
template <class Ex, class F>
typename std::enable_if<
std::is_base_of<std::exception, typename std::decay<Ex>::type>::value,
bool>::type
with_exception(F f) {
return with_exception1<typename std::decay<Ex>::type>(f, this);
}
// Const overload
template <class Ex, class F>
typename std::enable_if<
std::is_base_of<std::exception, typename std::decay<Ex>::type>::value,
bool>::type
with_exception(F f) const {
return with_exception1<const typename std::decay<Ex>::type>(f, this);
}
// Overload for non-exceptions. Always rethrows.
template <class Ex, class F>
typename std::enable_if<
!std::is_base_of<std::exception, typename std::decay<Ex>::type>::value,
bool>::type
with_exception(F f) const {
try {
if (*this) {
throwException();
}
} catch (typename std::decay<Ex>::type& e) {
f(e);
return true;
} catch (...) {
// fall through
}
return false;
}
std::exception_ptr getExceptionPtr() const {
if (eptr_) {
return eptr_;
}
try {
if (*this) {
throwException();
}
} catch (...) {
return std::current_exception();
}
return std::exception_ptr();
}
protected:
template <typename Ex>
struct optimize {
static const bool value =
std::is_base_of<std::exception, Ex>::value &&
std::is_copy_assignable<Ex>::value &&
!std::is_abstract<Ex>::value;
};
template <typename Ex>
void assign_eptr(std::exception_ptr eptr, Ex& e) {
this->eptr_ = eptr;
this->estr_ = exceptionStr(e).toStdString();
this->ename_ = demangle(typeid(e)).toStdString();
}
void assign_eptr(std::exception_ptr eptr) {
this->eptr_ = eptr;
}
// Optimized case: if we know what type the exception is, we can
// store a copy of the concrete type, and a helper function so we
// can rethrow it.
std::shared_ptr<std::exception> item_;
void (*throwfn_)(std::exception*){nullptr};
// Fallback case: store the library wrapper, which is less efficient
// but gets the job done. Also store exceptionPtr() the name of the
// exception type, so we can at least get those back out without
// having to rethrow.
std::exception_ptr eptr_;
std::string estr_;
std::string ename_;
template <class T, class... Args>
friend exception_wrapper make_exception_wrapper(Args&&... args);
private:
template <typename F>
struct functor_traits {
template <typename T>
struct impl;
template <typename C, typename R, typename A>
struct impl<R(C::*)(A)> { using arg_type = A; };
template <typename C, typename R, typename A>
struct impl<R(C::*)(A) const> { using arg_type = A; };
using functor_decayed = typename std::decay<F>::type;
using functor_op = decltype(&functor_decayed::operator());
using arg_type = typename impl<functor_op>::arg_type;
using arg_type_decayed = typename std::decay<arg_type>::type;
};
// What makes this useful is that T can be exception_wrapper* or
// const exception_wrapper*, and the compiler will use the
// instantiation which works with F.
template <class Ex, class F, class T>
static bool with_exception1(F f, T* that) {
if (that->item_) {
if (auto ex = dynamic_cast<Ex*>(that->item_.get())) {
f(*ex);
return true;
}
} else if (that->eptr_) {
try {
std::rethrow_exception(that->eptr_);
} catch (Ex& e) {
f(e);
return true;
} catch (...) {
// fall through
}
}
return false;
}
};
template <class T, class... Args>
exception_wrapper make_exception_wrapper(Args&&... args) {
exception_wrapper ew;
ew.item_ = std::make_shared<T>(std::forward<Args>(args)...);
ew.throwfn_ = folly::detail::Thrower<T>::doThrow;
return ew;
}
// For consistency with exceptionStr() functions in String.h
inline fbstring exceptionStr(const exception_wrapper& ew) {
return ew.what();
}
/*
* try_and_catch is a simple replacement for try {} catch(){} that allows you to
* specify which derived exceptions you would like to catch and store in an
* exception_wrapper.
*
* Because we cannot build an equivalent of std::current_exception(), we need
* to catch every derived exception that we are interested in catching.
*
* Exceptions should be listed in the reverse order that you would write your
* catch statements (that is, std::exception& should be first).
*
* NOTE: Although implemented as a derived class (for syntactic delight), don't
* be confused - you should not pass around try_and_catch objects!
*
* Example Usage:
*
* // This catches my runtime_error and if I call throwException() on ew, it
* // will throw a runtime_error
* auto ew = folly::try_and_catch<std::exception, std::runtime_error>([=]() {
* if (badThingHappens()) {
* throw std::runtime_error("ZOMG!");
* }
* });
*
* // This will catch the exception and if I call throwException() on ew, it
* // will throw a std::exception
* auto ew = folly::try_and_catch<std::exception, std::runtime_error>([=]() {
* if (badThingHappens()) {
* throw std::exception();
* }
* });
*
* // This will not catch the exception and it will be thrown.
* auto ew = folly::try_and_catch<std::runtime_error>([=]() {
* if (badThingHappens()) {
* throw std::exception();
* }
* });
*/
template <typename... Exceptions>
class try_and_catch;
template <typename LastException, typename... Exceptions>
class try_and_catch<LastException, Exceptions...> :
public try_and_catch<Exceptions...> {
public:
template <typename F>
explicit try_and_catch(F&& fn) : Base() {
call_fn(fn);
}
protected:
typedef try_and_catch<Exceptions...> Base;
try_and_catch() : Base() {}
template <typename Ex>
typename std::enable_if<!exception_wrapper::optimize<Ex>::value>::type
assign_exception(Ex& e, std::exception_ptr eptr) {
exception_wrapper::assign_eptr(eptr, e);
}
template <typename Ex>
typename std::enable_if<exception_wrapper::optimize<Ex>::value>::type
assign_exception(Ex& e, std::exception_ptr /*eptr*/) {
this->item_ = std::make_shared<Ex>(e);
this->throwfn_ = folly::detail::Thrower<Ex>::doThrow;
}
template <typename F>
void call_fn(F&& fn) {
try {
Base::call_fn(std::move(fn));
} catch (LastException& e) {
if (typeid(e) == typeid(LastException&)) {
assign_exception(e, std::current_exception());
} else {
exception_wrapper::assign_eptr(std::current_exception(), e);
}
}
}
};
template<>
class try_and_catch<> : public exception_wrapper {
public:
try_and_catch() = default;
protected:
template <typename F>
void call_fn(F&& fn) {
fn();
}
};
}

66
ios/Pods/Folly/folly/Executor.h generated Normal file
View File

@ -0,0 +1,66 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <atomic>
#include <climits>
#include <functional>
#include <stdexcept>
#include <folly/Function.h>
namespace folly {
using Func = Function<void()>;
/// An Executor accepts units of work with add(), which should be
/// threadsafe.
class Executor {
public:
virtual ~Executor() = default;
/// Enqueue a function to executed by this executor. This and all
/// variants must be threadsafe.
virtual void add(Func) = 0;
/// Enqueue a function with a given priority, where 0 is the medium priority
/// This is up to the implementation to enforce
virtual void addWithPriority(Func, int8_t /*priority*/) {
throw std::runtime_error(
"addWithPriority() is not implemented for this Executor");
}
virtual uint8_t getNumPriorities() const {
return 1;
}
static const int8_t LO_PRI = SCHAR_MIN;
static const int8_t MID_PRI = 0;
static const int8_t HI_PRI = SCHAR_MAX;
/// A convenience function for shared_ptr to legacy functors.
///
/// Sometimes you have a functor that is move-only, and therefore can't be
/// converted to a std::function (e.g. std::packaged_task). In that case,
/// wrap it in a shared_ptr (or maybe folly::MoveWrapper) and use this.
template <class P>
void addPtr(P fn) {
this->add([fn]() mutable { (*fn)(); });
}
};
} // folly

1384
ios/Pods/Folly/folly/Expected.h generated Normal file

File diff suppressed because it is too large Load Diff

2814
ios/Pods/Folly/folly/FBString.h generated Normal file

File diff suppressed because it is too large Load Diff

1667
ios/Pods/Folly/folly/FBVector.h generated Normal file

File diff suppressed because it is too large Load Diff

137
ios/Pods/Folly/folly/File.h generated Normal file
View File

@ -0,0 +1,137 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <string>
#include <folly/Portability.h>
#include <folly/Range.h>
#include <folly/portability/Unistd.h>
namespace folly {
/**
* A File represents an open file.
*/
class File {
public:
/**
* Creates an empty File object, for late initialization.
*/
File();
/**
* Create a File object from an existing file descriptor.
* Takes ownership of the file descriptor if ownsFd is true.
*/
explicit File(int fd, bool ownsFd = false);
/**
* Open and create a file object. Throws on error.
*/
explicit File(const char* name, int flags = O_RDONLY, mode_t mode = 0666);
explicit File(
const std::string& name, int flags = O_RDONLY, mode_t mode = 0666);
explicit File(StringPiece name, int flags = O_RDONLY, mode_t mode = 0666);
~File();
/**
* Create and return a temporary, owned file (uses tmpfile()).
*/
static File temporary();
/**
* Return the file descriptor, or -1 if the file was closed.
*/
int fd() const { return fd_; }
/**
* Returns 'true' iff the file was successfully opened.
*/
explicit operator bool() const {
return fd_ != -1;
}
/**
* Duplicate file descriptor and return File that owns it.
*/
File dup() const;
/**
* If we own the file descriptor, close the file and throw on error.
* Otherwise, do nothing.
*/
void close();
/**
* Closes the file (if owned). Returns true on success, false (and sets
* errno) on error.
*/
bool closeNoThrow();
/**
* Returns and releases the file descriptor; no longer owned by this File.
* Returns -1 if the File object didn't wrap a file.
*/
int release() noexcept;
/**
* Swap this File with another.
*/
void swap(File& other);
// movable
File(File&&) noexcept;
File& operator=(File&&);
// FLOCK (INTERPROCESS) LOCKS
//
// NOTE THAT THESE LOCKS ARE flock() LOCKS. That is, they may only be used
// for inter-process synchronization -- an attempt to acquire a second lock
// on the same file descriptor from the same process may succeed. Attempting
// to acquire a second lock on a different file descriptor for the same file
// should fail, but some systems might implement flock() using fcntl() locks,
// in which case it will succeed.
void lock();
bool try_lock();
void unlock();
void lock_shared();
bool try_lock_shared();
void unlock_shared();
private:
void doLock(int op);
bool doTryLock(int op);
// unique
File(const File&) = delete;
File& operator=(const File&) = delete;
int fd_;
bool ownsFd_;
};
void swap(File& a, File& b);
} // namespace folly

209
ios/Pods/Folly/folly/FileUtil.h generated Normal file
View File

@ -0,0 +1,209 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/Conv.h>
#include <folly/Portability.h>
#include <folly/ScopeGuard.h>
#include <folly/portability/Fcntl.h>
#include <folly/portability/SysUio.h>
#include <folly/portability/Unistd.h>
#include <cassert>
#include <limits>
#include <sys/stat.h>
#include <sys/types.h>
namespace folly {
/**
* Convenience wrappers around some commonly used system calls. The *NoInt
* wrappers retry on EINTR. The *Full wrappers retry on EINTR and also loop
* until all data is written. Note that *Full wrappers weaken the thread
* semantics of underlying system calls.
*/
int openNoInt(const char* name, int flags, mode_t mode = 0666);
int closeNoInt(int fd);
int dupNoInt(int fd);
int dup2NoInt(int oldfd, int newfd);
int fsyncNoInt(int fd);
int fdatasyncNoInt(int fd);
int ftruncateNoInt(int fd, off_t len);
int truncateNoInt(const char* path, off_t len);
int flockNoInt(int fd, int operation);
int shutdownNoInt(int fd, int how);
ssize_t readNoInt(int fd, void* buf, size_t n);
ssize_t preadNoInt(int fd, void* buf, size_t n, off_t offset);
ssize_t readvNoInt(int fd, const iovec* iov, int count);
ssize_t writeNoInt(int fd, const void* buf, size_t n);
ssize_t pwriteNoInt(int fd, const void* buf, size_t n, off_t offset);
ssize_t writevNoInt(int fd, const iovec* iov, int count);
/**
* Wrapper around read() (and pread()) that, in addition to retrying on
* EINTR, will loop until all data is read.
*
* This wrapper is only useful for blocking file descriptors (for non-blocking
* file descriptors, you have to be prepared to deal with incomplete reads
* anyway), and only exists because POSIX allows read() to return an incomplete
* read if interrupted by a signal (instead of returning -1 and setting errno
* to EINTR).
*
* Note that this wrapper weakens the thread safety of read(): the file pointer
* is shared between threads, but the system call is atomic. If multiple
* threads are reading from a file at the same time, you don't know where your
* data came from in the file, but you do know that the returned bytes were
* contiguous. You can no longer make this assumption if using readFull().
* You should probably use pread() when reading from the same file descriptor
* from multiple threads simultaneously, anyway.
*
* Note that readvFull and preadvFull require iov to be non-const, unlike
* readv and preadv. The contents of iov after these functions return
* is unspecified.
*/
ssize_t readFull(int fd, void* buf, size_t n);
ssize_t preadFull(int fd, void* buf, size_t n, off_t offset);
ssize_t readvFull(int fd, iovec* iov, int count);
ssize_t preadvFull(int fd, iovec* iov, int count, off_t offset);
/**
* Similar to readFull and preadFull above, wrappers around write() and
* pwrite() that loop until all data is written.
*
* Generally, the write() / pwrite() system call may always write fewer bytes
* than requested, just like read(). In certain cases (such as when writing to
* a pipe), POSIX provides stronger guarantees, but not in the general case.
* For example, Linux (even on a 64-bit platform) won't write more than 2GB in
* one write() system call.
*
* Note that writevFull and pwritevFull require iov to be non-const, unlike
* writev and pwritev. The contents of iov after these functions return
* is unspecified.
*/
ssize_t writeFull(int fd, const void* buf, size_t n);
ssize_t pwriteFull(int fd, const void* buf, size_t n, off_t offset);
ssize_t writevFull(int fd, iovec* iov, int count);
ssize_t pwritevFull(int fd, iovec* iov, int count, off_t offset);
/**
* Read entire file (if num_bytes is defaulted) or no more than
* num_bytes (otherwise) into container *out. The container is assumed
* to be contiguous, with element size equal to 1, and offer size(),
* reserve(), and random access (e.g. std::vector<char>, std::string,
* fbstring).
*
* Returns: true on success or false on failure. In the latter case
* errno will be set appropriately by the failing system primitive.
*/
template <class Container>
bool readFile(
int fd,
Container& out,
size_t num_bytes = std::numeric_limits<size_t>::max()) {
static_assert(sizeof(out[0]) == 1,
"readFile: only containers with byte-sized elements accepted");
size_t soFar = 0; // amount of bytes successfully read
SCOPE_EXIT {
DCHECK(out.size() >= soFar); // resize better doesn't throw
out.resize(soFar);
};
// Obtain file size:
struct stat buf;
if (fstat(fd, &buf) == -1) return false;
// Some files (notably under /proc and /sys on Linux) lie about
// their size, so treat the size advertised by fstat under advise
// but don't rely on it. In particular, if the size is zero, we
// should attempt to read stuff. If not zero, we'll attempt to read
// one extra byte.
constexpr size_t initialAlloc = 1024 * 4;
out.resize(
std::min(
buf.st_size > 0 ? folly::to<size_t>(buf.st_size + 1) : initialAlloc,
num_bytes));
while (soFar < out.size()) {
const auto actual = readFull(fd, &out[soFar], out.size() - soFar);
if (actual == -1) {
return false;
}
soFar += actual;
if (soFar < out.size()) {
// File exhausted
break;
}
// Ew, allocate more memory. Use exponential growth to avoid
// quadratic behavior. Cap size to num_bytes.
out.resize(std::min(out.size() * 3 / 2, num_bytes));
}
return true;
}
/**
* Same as above, but takes in a file name instead of fd
*/
template <class Container>
bool readFile(
const char* file_name,
Container& out,
size_t num_bytes = std::numeric_limits<size_t>::max()) {
DCHECK(file_name);
const auto fd = openNoInt(file_name, O_RDONLY);
if (fd == -1) {
return false;
}
SCOPE_EXIT {
// Ignore errors when closing the file
closeNoInt(fd);
};
return readFile(fd, out, num_bytes);
}
/**
* Writes container to file. The container is assumed to be
* contiguous, with element size equal to 1, and offering STL-like
* methods empty(), size(), and indexed access
* (e.g. std::vector<char>, std::string, fbstring, StringPiece).
*
* "flags" dictates the open flags to use. Default is to create file
* if it doesn't exist and truncate it.
*
* Returns: true on success or false on failure. In the latter case
* errno will be set appropriately by the failing system primitive.
*/
template <class Container>
bool writeFile(const Container& data, const char* filename,
int flags = O_WRONLY | O_CREAT | O_TRUNC) {
static_assert(sizeof(data[0]) == 1,
"writeFile works with element size equal to 1");
int fd = open(filename, flags, 0666);
if (fd == -1) {
return false;
}
bool ok = data.empty() ||
writeFull(fd, &data[0], data.size()) == static_cast<ssize_t>(data.size());
return closeNoInt(fd) == 0 && ok;
}
} // namespaces

281
ios/Pods/Folly/folly/Fingerprint.h generated Normal file
View File

@ -0,0 +1,281 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Compute 64-, 96-, and 128-bit Rabin fingerprints, as described in
* Michael O. Rabin (1981)
* Fingerprinting by Random Polynomials
* Center for Research in Computing Technology, Harvard University
* Tech Report TR-CSE-03-01
*
* The implementation follows the optimization described in
* Andrei Z. Broder (1993)
* Some applications of Rabin's fingerprinting method
*
* extended for fingerprints larger than 64 bits, and modified to use
* 64-bit instead of 32-bit integers for computation.
*
* The precomputed tables are in FingerprintTable.cpp, which is automatically
* generated by ComputeFingerprintTable.cpp.
*
* Benchmarked on 10/13/2009 on a 2.5GHz quad-core Xeon L5420,
* - Fingerprint<64>::update64() takes about 12ns
* - Fingerprint<96>::update64() takes about 30ns
* - Fingerprint<128>::update128() takes about 30ns
* (unsurprisingly, Fingerprint<96> and Fingerprint<128> take the
* same amount of time, as they both use 128-bit operations; the least
* significant 32 bits of Fingerprint<96> will always be 0)
*
* @author Tudor Bosman (tudorb@facebook.com)
*/
#pragma once
#include <cstdint>
#include <folly/Range.h>
namespace folly {
namespace detail {
template <int BITS>
struct FingerprintTable {
static const uint64_t poly[1 + (BITS - 1) / 64];
static const uint64_t table[8][256][1 + (BITS - 1) / 64];
};
template <int BITS>
const uint64_t FingerprintTable<BITS>::poly[1 + (BITS - 1) / 64] = {};
template <int BITS>
const uint64_t FingerprintTable<BITS>::table[8][256][1 + (BITS - 1) / 64] = {};
#define FOLLY_DECLARE_FINGERPRINT_TABLES(BITS) \
template <> \
const uint64_t FingerprintTable<BITS>::poly[1 + (BITS - 1) / 64]; \
template <> \
const uint64_t FingerprintTable<BITS>::table[8][256][1 + (BITS - 1) / 64]
FOLLY_DECLARE_FINGERPRINT_TABLES(64);
FOLLY_DECLARE_FINGERPRINT_TABLES(96);
FOLLY_DECLARE_FINGERPRINT_TABLES(128);
#undef FOLLY_DECLARE_FINGERPRINT_TABLES
} // namespace detail
/**
* Compute the Rabin fingerprint.
*
* TODO(tudorb): Extend this to allow removing values from the computed
* fingerprint (so we can fingerprint a sliding window, as in the Rabin-Karp
* string matching algorithm)
*
* update* methods return *this, so you can chain them together:
* Fingerprint<96>().update8(x).update(str).update64(val).write(output);
*/
template <int BITS>
class Fingerprint {
public:
Fingerprint() {
// Use a non-zero starting value. We'll use (1 << (BITS-1))
fp_[0] = 1ULL << 63;
for (int i = 1; i < size(); i++)
fp_[i] = 0;
}
Fingerprint& update8(uint8_t v) {
uint8_t out = shlor8(v);
xortab(detail::FingerprintTable<BITS>::table[0][out]);
return *this;
}
// update32 and update64 are convenience functions to update the fingerprint
// with 4 and 8 bytes at a time. They are faster than calling update8
// in a loop. They process the bytes in big-endian order.
Fingerprint& update32(uint32_t v) {
uint32_t out = shlor32(v);
for (int i = 0; i < 4; i++) {
xortab(detail::FingerprintTable<BITS>::table[i][out&0xff]);
out >>= 8;
}
return *this;
}
Fingerprint& update64(uint64_t v) {
uint64_t out = shlor64(v);
for (int i = 0; i < 8; i++) {
xortab(detail::FingerprintTable<BITS>::table[i][out&0xff]);
out >>= 8;
}
return *this;
}
Fingerprint& update(StringPiece str) {
// TODO(tudorb): We could be smart and do update64 or update32 if aligned
for (auto c : str) {
update8(uint8_t(c));
}
return *this;
}
/**
* Return the number of uint64s needed to hold the fingerprint value.
*/
static int size() {
return 1 + (BITS-1)/64;
}
/**
* Write the computed fingeprint to an array of size() uint64_t's.
* For Fingerprint<64>, size()==1; we write 64 bits in out[0]
* For Fingerprint<96>, size()==2; we write 64 bits in out[0] and
* the most significant 32 bits of out[1]
* For Fingerprint<128>, size()==2; we write 64 bits in out[0] and
* 64 bits in out[1].
*/
void write(uint64_t* out) const {
for (int i = 0; i < size(); i++) {
out[i] = fp_[i];
}
}
private:
// XOR the fingerprint with a value from one of the tables.
void xortab(const uint64_t* tab) {
for (int i = 0; i < size(); i++) {
fp_[i] ^= tab[i];
}
}
// Helper functions: shift the fingerprint value left by 8/32/64 bits,
// return the "out" value (the bits that were shifted out), and add "v"
// in the bits on the right.
uint8_t shlor8(uint8_t v);
uint32_t shlor32(uint32_t v);
uint64_t shlor64(uint64_t v);
uint64_t fp_[1 + (BITS-1)/64];
};
// Convenience functions
/**
* Return the 64-bit Rabin fingerprint of a string.
*/
inline uint64_t fingerprint64(StringPiece str) {
uint64_t fp;
Fingerprint<64>().update(str).write(&fp);
return fp;
}
/**
* Compute the 96-bit Rabin fingerprint of a string.
* Return the 64 most significant bits in *msb, and the 32 least significant
* bits in *lsb.
*/
inline void fingerprint96(StringPiece str,
uint64_t* msb, uint32_t* lsb) {
uint64_t fp[2];
Fingerprint<96>().update(str).write(fp);
*msb = fp[0];
*lsb = (uint32_t)(fp[1] >> 32);
}
/**
* Compute the 128-bit Rabin fingerprint of a string.
* Return the 64 most significant bits in *msb, and the 64 least significant
* bits in *lsb.
*/
inline void fingerprint128(StringPiece str,
uint64_t* msb, uint64_t* lsb) {
uint64_t fp[2];
Fingerprint<128>().update(str).write(fp);
*msb = fp[0];
*lsb = fp[1];
}
template <>
inline uint8_t Fingerprint<64>::shlor8(uint8_t v) {
uint8_t out = (uint8_t)(fp_[0] >> 56);
fp_[0] = (fp_[0] << 8) | ((uint64_t)v);
return out;
}
template <>
inline uint32_t Fingerprint<64>::shlor32(uint32_t v) {
uint32_t out = (uint32_t)(fp_[0] >> 32);
fp_[0] = (fp_[0] << 32) | ((uint64_t)v);
return out;
}
template <>
inline uint64_t Fingerprint<64>::shlor64(uint64_t v) {
uint64_t out = fp_[0];
fp_[0] = v;
return out;
}
template <>
inline uint8_t Fingerprint<96>::shlor8(uint8_t v) {
uint8_t out = (uint8_t)(fp_[0] >> 56);
fp_[0] = (fp_[0] << 8) | (fp_[1] >> 56);
fp_[1] = (fp_[1] << 8) | ((uint64_t)v << 32);
return out;
}
template <>
inline uint32_t Fingerprint<96>::shlor32(uint32_t v) {
uint32_t out = (uint32_t)(fp_[0] >> 32);
fp_[0] = (fp_[0] << 32) | (fp_[1] >> 32);
fp_[1] = ((uint64_t)v << 32);
return out;
}
template <>
inline uint64_t Fingerprint<96>::shlor64(uint64_t v) {
uint64_t out = fp_[0];
fp_[0] = fp_[1] | (v >> 32);
fp_[1] = v << 32;
return out;
}
template <>
inline uint8_t Fingerprint<128>::shlor8(uint8_t v) {
uint8_t out = (uint8_t)(fp_[0] >> 56);
fp_[0] = (fp_[0] << 8) | (fp_[1] >> 56);
fp_[1] = (fp_[1] << 8) | ((uint64_t)v);
return out;
}
template <>
inline uint32_t Fingerprint<128>::shlor32(uint32_t v) {
uint32_t out = (uint32_t)(fp_[0] >> 32);
fp_[0] = (fp_[0] << 32) | (fp_[1] >> 32);
fp_[1] = (fp_[1] << 32) | ((uint64_t)v);
return out;
}
template <>
inline uint64_t Fingerprint<128>::shlor64(uint64_t v) {
uint64_t out = fp_[0];
fp_[0] = fp_[1];
fp_[1] = v;
return out;
}
} // namespace folly

235
ios/Pods/Folly/folly/Foreach.h generated Normal file
View File

@ -0,0 +1,235 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
/*
* Iterim macros (until we have C++0x range-based for) that simplify
* writing loops of the form
*
* for (Container<data>::iterator i = c.begin(); i != c.end(); ++i) statement
*
* Just replace the above with:
*
* FOR_EACH (i, c) statement
*
* and everything is taken care of.
*
* The implementation is a bit convoluted to make sure the container is
* evaluated only once (however, keep in mind that c.end() is evaluated
* at every pass through the loop). To ensure the container is not
* evaluated multiple times, the macro defines one do-nothing if
* statement to inject the Boolean variable FOR_EACH_state1, and then a
* for statement that is executed only once, which defines the variable
* FOR_EACH_state2 holding an rvalue reference to the container being
* iterated. The workhorse is the last loop, which uses the just-defined
* rvalue reference FOR_EACH_state2.
*
* The state variables are nested so they don't interfere; you can use
* FOR_EACH multiple times in the same scope, either at the same level or
* nested.
*
* In optimized builds g++ eliminates the extra gymnastics entirely and
* generates code 100% identical to the handwritten loop.
*/
#include <type_traits>
#include <folly/Preprocessor.h>
/*
* Form a local variable name from "FOR_EACH_" x __LINE__, so that
* FOR_EACH can be nested without creating shadowed declarations.
*/
#define _FE_ANON(x) FB_CONCATENATE(FOR_EACH_, FB_CONCATENATE(x, __LINE__))
/*
* Shorthand for:
* for (auto i = c.begin(); i != c.end(); ++i)
* except that c is evaluated only once.
*/
#define FOR_EACH(i, c) \
if (bool _FE_ANON(s1_) = false) {} else \
for (auto && _FE_ANON(s2_) = (c); \
!_FE_ANON(s1_); _FE_ANON(s1_) = true) \
for (auto i = _FE_ANON(s2_).begin(); \
i != _FE_ANON(s2_).end(); ++i)
/*
* Similar to FOR_EACH, but iterates the container backwards by
* using rbegin() and rend().
*/
#define FOR_EACH_R(i, c) \
if (bool FOR_EACH_R_state1 = false) {} else \
for (auto && FOR_EACH_R_state2 = (c); \
!FOR_EACH_R_state1; FOR_EACH_R_state1 = true) \
for (auto i = FOR_EACH_R_state2.rbegin(); \
i != FOR_EACH_R_state2.rend(); ++i)
/*
* Similar to FOR_EACH but also allows client to specify a 'count' variable
* to track the current iteration in the loop (starting at zero).
* Similar to python's enumerate() function. For example:
* string commaSeparatedValues = "VALUES: ";
* FOR_EACH_ENUMERATE(ii, value, columns) { // don't want comma at the end!
* commaSeparatedValues += (ii == 0) ? *value : string(",") + *value;
* }
*/
#define FOR_EACH_ENUMERATE(count, i, c) \
if (bool FOR_EACH_state1 = false) {} else \
for (auto && FOR_EACH_state2 = (c); \
!FOR_EACH_state1; FOR_EACH_state1 = true) \
if (size_t FOR_EACH_privateCount = 0) {} else \
if (const size_t& count = FOR_EACH_privateCount) {} else \
for (auto i = FOR_EACH_state2.begin(); \
i != FOR_EACH_state2.end(); ++FOR_EACH_privateCount, ++i)
/**
* Similar to FOR_EACH, but gives the user the key and value for each entry in
* the container, instead of just the iterator to the entry. For example:
* map<string, string> testMap;
* FOR_EACH_KV(key, value, testMap) {
* cout << key << " " << value;
* }
*/
#define FOR_EACH_KV(k, v, c) \
if (unsigned int FOR_EACH_state1 = 0) {} else \
for (auto && FOR_EACH_state2 = (c); \
!FOR_EACH_state1; FOR_EACH_state1 = 1) \
for (auto FOR_EACH_state3 = FOR_EACH_state2.begin(); \
FOR_EACH_state3 != FOR_EACH_state2.end(); \
FOR_EACH_state1 == 2 \
? ((FOR_EACH_state1 = 0), ++FOR_EACH_state3) \
: (FOR_EACH_state3 = FOR_EACH_state2.end())) \
for (auto &k = FOR_EACH_state3->first; \
!FOR_EACH_state1; ++FOR_EACH_state1) \
for (auto &v = FOR_EACH_state3->second; \
!FOR_EACH_state1; ++FOR_EACH_state1)
namespace folly { namespace detail {
// Boost 1.48 lacks has_less, we emulate a subset of it here.
template <typename T, typename U>
class HasLess {
struct BiggerThanChar { char unused[2]; };
template <typename C, typename D> static char test(decltype(C() < D())*);
template <typename, typename> static BiggerThanChar test(...);
public:
enum { value = sizeof(test<T, U>(0)) == 1 };
};
/**
* notThereYet helps the FOR_EACH_RANGE macro by opportunistically
* using "<" instead of "!=" whenever available when checking for loop
* termination. This makes e.g. examples such as FOR_EACH_RANGE (i,
* 10, 5) execute zero iterations instead of looping virtually
* forever. At the same time, some iterator types define "!=" but not
* "<". The notThereYet function will dispatch differently for those.
*
* Below is the correct implementation of notThereYet. It is disabled
* because of a bug in Boost 1.46: The filesystem::path::iterator
* defines operator< (via boost::iterator_facade), but that in turn
* uses distance_to which is undefined for that particular
* iterator. So HasLess (defined above) identifies
* boost::filesystem::path as properly comparable with <, but in fact
* attempting to do so will yield a compile-time error.
*
* The else branch (active) contains a conservative
* implementation.
*/
#if 0
template <class T, class U>
typename std::enable_if<HasLess<T, U>::value, bool>::type
notThereYet(T& iter, const U& end) {
return iter < end;
}
template <class T, class U>
typename std::enable_if<!HasLess<T, U>::value, bool>::type
notThereYet(T& iter, const U& end) {
return iter != end;
}
#else
template <class T, class U>
typename std::enable_if<
(std::is_arithmetic<T>::value && std::is_arithmetic<U>::value) ||
(std::is_pointer<T>::value && std::is_pointer<U>::value),
bool>::type
notThereYet(T& iter, const U& end) {
return iter < end;
}
template <class T, class U>
typename std::enable_if<
!(
(std::is_arithmetic<T>::value && std::is_arithmetic<U>::value) ||
(std::is_pointer<T>::value && std::is_pointer<U>::value)
),
bool>::type
notThereYet(T& iter, const U& end) {
return iter != end;
}
#endif
/**
* downTo is similar to notThereYet, but in reverse - it helps the
* FOR_EACH_RANGE_R macro.
*/
template <class T, class U>
typename std::enable_if<HasLess<U, T>::value, bool>::type
downTo(T& iter, const U& begin) {
return begin < iter--;
}
template <class T, class U>
typename std::enable_if<!HasLess<U, T>::value, bool>::type
downTo(T& iter, const U& begin) {
if (iter == begin) return false;
--iter;
return true;
}
} }
/*
* Iteration with given limits. end is assumed to be reachable from
* begin. end is evaluated every pass through the loop.
*
* NOTE: The type of the loop variable should be the common type of "begin"
* and "end". e.g. If "begin" is "int" but "end" is "long", we want "i"
* to be "long". This is done by getting the type of (true ? begin : end)
*/
#define FOR_EACH_RANGE(i, begin, end) \
for (auto i = (true ? (begin) : (end)); \
::folly::detail::notThereYet(i, (end)); \
++i)
/*
* Iteration with given limits. begin is assumed to be reachable from
* end by successive decrements. begin is evaluated every pass through
* the loop.
*
* NOTE: The type of the loop variable should be the common type of "begin"
* and "end". e.g. If "begin" is "int" but "end" is "long", we want "i"
* to be "long". This is done by getting the type of (false ? begin : end)
*/
#define FOR_EACH_RANGE_R(i, begin, end) \
for (auto i = (false ? (begin) : (end)); ::folly::detail::downTo(i, (begin));)

1098
ios/Pods/Folly/folly/Format-inl.h generated Normal file

File diff suppressed because it is too large Load Diff

435
ios/Pods/Folly/folly/Format.h generated Normal file
View File

@ -0,0 +1,435 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#define FOLLY_FORMAT_H_
#include <cstdio>
#include <tuple>
#include <type_traits>
#include <folly/Conv.h>
#include <folly/Range.h>
#include <folly/Traits.h>
#include <folly/String.h>
#include <folly/FormatArg.h>
// Ignore shadowing warnings within this file, so includers can use -Wshadow.
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wshadow"
namespace folly {
// forward declarations
template <bool containerMode, class... Args> class Formatter;
template <class... Args>
Formatter<false, Args...> format(StringPiece fmt, Args&&... args);
template <class C>
Formatter<true, C> vformat(StringPiece fmt, C&& container);
template <class T, class Enable=void> class FormatValue;
// meta-attribute to identify formatters in this sea of template weirdness
namespace detail {
class FormatterTag {};
};
/**
* Formatter class.
*
* Note that this class is tricky, as it keeps *references* to its arguments
* (and doesn't copy the passed-in format string). Thankfully, you can't use
* this directly, you have to use format(...) below.
*/
/* BaseFormatter class. Currently, the only behavior that can be
* overridden is the actual formatting of positional parameters in
* `doFormatArg`. The Formatter class provides the default implementation.
*/
template <class Derived, bool containerMode, class... Args>
class BaseFormatter {
public:
/**
* Append to output. out(StringPiece sp) may be called (more than once)
*/
template <class Output>
void operator()(Output& out) const;
/**
* Append to a string.
*/
template <class Str>
typename std::enable_if<IsSomeString<Str>::value>::type
appendTo(Str& str) const {
auto appender = [&str] (StringPiece s) { str.append(s.data(), s.size()); };
(*this)(appender);
}
/**
* Conversion to string
*/
std::string str() const {
std::string s;
appendTo(s);
return s;
}
/**
* Conversion to fbstring
*/
fbstring fbstr() const {
fbstring s;
appendTo(s);
return s;
}
/**
* metadata to identify generated children of BaseFormatter
*/
typedef detail::FormatterTag IsFormatter;
typedef BaseFormatter BaseType;
private:
typedef std::tuple<FormatValue<
typename std::decay<Args>::type>...> ValueTuple;
static constexpr size_t valueCount = std::tuple_size<ValueTuple>::value;
template <size_t K, class Callback>
typename std::enable_if<K == valueCount>::type
doFormatFrom(size_t i, FormatArg& arg, Callback& /*cb*/) const {
arg.error("argument index out of range, max=", i);
}
template <size_t K, class Callback>
typename std::enable_if<(K < valueCount)>::type
doFormatFrom(size_t i, FormatArg& arg, Callback& cb) const {
if (i == K) {
static_cast<const Derived*>(this)->template doFormatArg<K>(arg, cb);
} else {
doFormatFrom<K+1>(i, arg, cb);
}
}
template <class Callback>
void doFormat(size_t i, FormatArg& arg, Callback& cb) const {
return doFormatFrom<0>(i, arg, cb);
}
template <size_t K>
typename std::enable_if<K == valueCount, int>::type
getSizeArgFrom(size_t i, const FormatArg& arg) const {
arg.error("argument index out of range, max=", i);
}
template <class T>
typename std::enable_if<std::is_integral<T>::value &&
!std::is_same<T, bool>::value, int>::type
getValue(const FormatValue<T>& format, const FormatArg&) const {
return static_cast<int>(format.getValue());
}
template <class T>
typename std::enable_if<!std::is_integral<T>::value ||
std::is_same<T, bool>::value, int>::type
getValue(const FormatValue<T>&, const FormatArg& arg) const {
arg.error("dynamic field width argument must be integral");
}
template <size_t K>
typename std::enable_if<K < valueCount, int>::type
getSizeArgFrom(size_t i, const FormatArg& arg) const {
if (i == K) {
return getValue(std::get<K>(values_), arg);
}
return getSizeArgFrom<K+1>(i, arg);
}
int getSizeArg(size_t i, const FormatArg& arg) const {
return getSizeArgFrom<0>(i, arg);
}
StringPiece str_;
protected:
explicit BaseFormatter(StringPiece str, Args&&... args);
// Not copyable
BaseFormatter(const BaseFormatter&) = delete;
BaseFormatter& operator=(const BaseFormatter&) = delete;
// Movable, but the move constructor and assignment operator are private,
// for the exclusive use of format() (below). This way, you can't create
// a Formatter object, but can handle references to it (for streaming,
// conversion to string, etc) -- which is good, as Formatter objects are
// dangerous (they hold references, possibly to temporaries)
BaseFormatter(BaseFormatter&&) = default;
BaseFormatter& operator=(BaseFormatter&&) = default;
ValueTuple values_;
};
template <bool containerMode, class... Args>
class Formatter : public BaseFormatter<Formatter<containerMode, Args...>,
containerMode,
Args...> {
private:
explicit Formatter(StringPiece& str, Args&&... args)
: BaseFormatter<Formatter<containerMode, Args...>,
containerMode,
Args...>(str, std::forward<Args>(args)...) {}
template <size_t K, class Callback>
void doFormatArg(FormatArg& arg, Callback& cb) const {
std::get<K>(this->values_).format(arg, cb);
}
friend class BaseFormatter<Formatter<containerMode, Args...>,
containerMode,
Args...>;
template <class... A>
friend Formatter<false, A...> format(StringPiece fmt, A&&... arg);
template <class C>
friend Formatter<true, C> vformat(StringPiece fmt, C&& container);
};
/**
* Formatter objects can be written to streams.
*/
template<bool containerMode, class... Args>
std::ostream& operator<<(std::ostream& out,
const Formatter<containerMode, Args...>& formatter) {
auto writer = [&out] (StringPiece sp) { out.write(sp.data(), sp.size()); };
formatter(writer);
return out;
}
/**
* Formatter objects can be written to stdio FILEs.
*/
template <class Derived, bool containerMode, class... Args>
void writeTo(FILE* fp,
const BaseFormatter<Derived, containerMode, Args...>& formatter);
/**
* Create a formatter object.
*
* std::string formatted = format("{} {}", 23, 42).str();
* LOG(INFO) << format("{} {}", 23, 42);
* writeTo(stdout, format("{} {}", 23, 42));
*/
template <class... Args>
Formatter<false, Args...> format(StringPiece fmt, Args&&... args) {
return Formatter<false, Args...>(
fmt, std::forward<Args>(args)...);
}
/**
* Like format(), but immediately returns the formatted string instead of an
* intermediate format object.
*/
template <class... Args>
inline std::string sformat(StringPiece fmt, Args&&... args) {
return format(fmt, std::forward<Args>(args)...).str();
}
/**
* Create a formatter object that takes one argument (of container type)
* and uses that container to get argument values from.
*
* std::map<string, string> map { {"hello", "world"}, {"answer", "42"} };
*
* The following are equivalent:
* format("{0[hello]} {0[answer]}", map);
*
* vformat("{hello} {answer}", map);
*
* but the latter is cleaner.
*/
template <class Container>
Formatter<true, Container> vformat(StringPiece fmt, Container&& container) {
return Formatter<true, Container>(
fmt, std::forward<Container>(container));
}
/**
* Like vformat(), but immediately returns the formatted string instead of an
* intermediate format object.
*/
template <class Container>
inline std::string svformat(StringPiece fmt, Container&& container) {
return vformat(fmt, std::forward<Container>(container)).str();
}
/**
* Wrap a sequence or associative container so that out-of-range lookups
* return a default value rather than throwing an exception.
*
* Usage:
* format("[no_such_key"], defaulted(map, 42)) -> 42
*/
namespace detail {
template <class Container, class Value> struct DefaultValueWrapper {
DefaultValueWrapper(const Container& container, const Value& defaultValue)
: container(container),
defaultValue(defaultValue) {
}
const Container& container;
const Value& defaultValue;
};
} // namespace
template <class Container, class Value>
detail::DefaultValueWrapper<Container, Value>
defaulted(const Container& c, const Value& v) {
return detail::DefaultValueWrapper<Container, Value>(c, v);
}
/**
* Append formatted output to a string.
*
* std::string foo;
* format(&foo, "{} {}", 42, 23);
*
* Shortcut for toAppend(format(...), &foo);
*/
template <class Str, class... Args>
typename std::enable_if<IsSomeString<Str>::value>::type
format(Str* out, StringPiece fmt, Args&&... args) {
format(fmt, std::forward<Args>(args)...).appendTo(*out);
}
/**
* Append vformatted output to a string.
*/
template <class Str, class Container>
typename std::enable_if<IsSomeString<Str>::value>::type
vformat(Str* out, StringPiece fmt, Container&& container) {
vformat(fmt, std::forward<Container>(container)).appendTo(*out);
}
/**
* Utilities for all format value specializations.
*/
namespace format_value {
/**
* Format a string in "val", obeying appropriate alignment, padding, width,
* and precision. Treats Align::DEFAULT as Align::LEFT, and
* Align::PAD_AFTER_SIGN as Align::RIGHT; use formatNumber for
* number-specific formatting.
*/
template <class FormatCallback>
void formatString(StringPiece val, FormatArg& arg, FormatCallback& cb);
/**
* Format a number in "val"; the first prefixLen characters form the prefix
* (sign, "0x" base prefix, etc) which must be left-aligned if the alignment
* is Align::PAD_AFTER_SIGN. Treats Align::DEFAULT as Align::LEFT. Ignores
* arg.precision, as that has a different meaning for numbers (not "maximum
* field width")
*/
template <class FormatCallback>
void formatNumber(StringPiece val, int prefixLen, FormatArg& arg,
FormatCallback& cb);
/**
* Format a Formatter object recursively. Behaves just like
* formatString(fmt.str(), arg, cb); but avoids creating a temporary
* string if possible.
*/
template <class FormatCallback,
class Derived,
bool containerMode,
class... Args>
void formatFormatter(
const BaseFormatter<Derived, containerMode, Args...>& formatter,
FormatArg& arg,
FormatCallback& cb);
} // namespace format_value
/*
* Specialize folly::FormatValue for your type.
*
* FormatValue<T> is constructed with a (reference-collapsed) T&&, which is
* guaranteed to stay alive until the FormatValue object is destroyed, so you
* may keep a reference (or pointer) to it instead of making a copy.
*
* You must define
* template <class Callback>
* void format(FormatArg& arg, Callback& cb) const;
* with the following semantics: format the value using the given argument.
*
* arg is given by non-const reference for convenience -- it won't be reused,
* so feel free to modify it in place if necessary. (For example, wrap an
* existing conversion but change the default, or remove the "key" when
* extracting an element from a container)
*
* Call the callback to append data to the output. You may call the callback
* as many times as you'd like (or not at all, if you want to output an
* empty string)
*/
namespace detail {
template <class T, class Enable = void>
struct IsFormatter : public std::false_type {};
template <class T>
struct IsFormatter<
T,
typename std::enable_if<
std::is_same<typename T::IsFormatter, detail::FormatterTag>::value>::
type> : public std::true_type {};
} // folly::detail
// Deprecated API. formatChecked() et. al. now behave identically to their
// non-Checked counterparts.
template <class... Args>
Formatter<false, Args...> formatChecked(StringPiece fmt, Args&&... args) {
return format(fmt, std::forward<Args>(args)...);
}
template <class... Args>
inline std::string sformatChecked(StringPiece fmt, Args&&... args) {
return formatChecked(fmt, std::forward<Args>(args)...).str();
}
template <class Container>
Formatter<true, Container> vformatChecked(StringPiece fmt,
Container&& container) {
return vformat(fmt, std::forward<Container>(container));
}
template <class Container>
inline std::string svformatChecked(StringPiece fmt, Container&& container) {
return vformatChecked(fmt, std::forward<Container>(container)).str();
}
template <class Str, class... Args>
typename std::enable_if<IsSomeString<Str>::value>::type
formatChecked(Str* out, StringPiece fmt, Args&&... args) {
formatChecked(fmt, std::forward<Args>(args)...).appendTo(*out);
}
template <class Str, class Container>
typename std::enable_if<IsSomeString<Str>::value>::type
vformatChecked(Str* out, StringPiece fmt, Container&& container) {
vformatChecked(fmt, std::forward<Container>(container)).appendTo(*out);
}
} // namespace folly
#include <folly/Format-inl.h>
#pragma GCC diagnostic pop

276
ios/Pods/Folly/folly/FormatArg.h generated Normal file
View File

@ -0,0 +1,276 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <stdexcept>
#include <folly/Conv.h>
#include <folly/Likely.h>
#include <folly/Portability.h>
#include <folly/Range.h>
namespace folly {
class BadFormatArg : public std::invalid_argument {
public:
explicit BadFormatArg(const std::string& msg)
: std::invalid_argument(msg) {}
};
/**
* Parsed format argument.
*/
struct FormatArg {
/**
* Parse a format argument from a string. Keeps a reference to the
* passed-in string -- does not copy the given characters.
*/
explicit FormatArg(StringPiece sp)
: fullArgString(sp),
fill(kDefaultFill),
align(Align::DEFAULT),
sign(Sign::DEFAULT),
basePrefix(false),
thousandsSeparator(false),
trailingDot(false),
width(kDefaultWidth),
widthIndex(kNoIndex),
precision(kDefaultPrecision),
presentation(kDefaultPresentation),
nextKeyMode_(NextKeyMode::NONE) {
if (!sp.empty()) {
initSlow();
}
}
enum class Type {
INTEGER,
FLOAT,
OTHER
};
/**
* Validate the argument for the given type; throws on error.
*/
void validate(Type type) const;
/**
* Throw an exception if the first argument is false. The exception
* message will contain the argument string as well as any passed-in
* arguments to enforce, formatted using folly::to<std::string>.
*/
template <typename... Args>
void enforce(bool v, Args&&... args) const {
if (UNLIKELY(!v)) {
error(std::forward<Args>(args)...);
}
}
template <typename... Args>
std::string errorStr(Args&&... args) const;
template <typename... Args>
[[noreturn]] void error(Args&&... args) const;
/**
* Full argument string, as passed in to the constructor.
*/
StringPiece fullArgString;
/**
* Fill
*/
static constexpr char kDefaultFill = '\0';
char fill;
/**
* Alignment
*/
enum class Align : uint8_t {
DEFAULT,
LEFT,
RIGHT,
PAD_AFTER_SIGN,
CENTER,
INVALID
};
Align align;
/**
* Sign
*/
enum class Sign : uint8_t {
DEFAULT,
PLUS_OR_MINUS,
MINUS,
SPACE_OR_MINUS,
INVALID
};
Sign sign;
/**
* Output base prefix (0 for octal, 0x for hex)
*/
bool basePrefix;
/**
* Output thousands separator (comma)
*/
bool thousandsSeparator;
/**
* Force a trailing decimal on doubles which could be rendered as ints
*/
bool trailingDot;
/**
* Field width and optional argument index
*/
static constexpr int kDefaultWidth = -1;
static constexpr int kDynamicWidth = -2;
static constexpr int kNoIndex = -1;
int width;
int widthIndex;
/**
* Precision
*/
static constexpr int kDefaultPrecision = -1;
int precision;
/**
* Presentation
*/
static constexpr char kDefaultPresentation = '\0';
char presentation;
/**
* Split a key component from "key", which must be non-empty (an exception
* is thrown otherwise).
*/
template <bool emptyOk=false>
StringPiece splitKey();
/**
* Is the entire key empty?
*/
bool keyEmpty() const {
return nextKeyMode_ == NextKeyMode::NONE && key_.empty();
}
/**
* Split an key component from "key", which must be non-empty and a valid
* integer (an exception is thrown otherwise).
*/
int splitIntKey();
void setNextIntKey(int val) {
assert(nextKeyMode_ == NextKeyMode::NONE);
nextKeyMode_ = NextKeyMode::INT;
nextIntKey_ = val;
}
void setNextKey(StringPiece val) {
assert(nextKeyMode_ == NextKeyMode::NONE);
nextKeyMode_ = NextKeyMode::STRING;
nextKey_ = val;
}
private:
void initSlow();
template <bool emptyOk>
StringPiece doSplitKey();
StringPiece key_;
int nextIntKey_;
StringPiece nextKey_;
enum class NextKeyMode {
NONE,
INT,
STRING,
};
NextKeyMode nextKeyMode_;
};
template <typename... Args>
inline std::string FormatArg::errorStr(Args&&... args) const {
return to<std::string>(
"invalid format argument {", fullArgString, "}: ",
std::forward<Args>(args)...);
}
template <typename... Args>
[[noreturn]] inline void FormatArg::error(Args&&... args) const {
throw BadFormatArg(errorStr(std::forward<Args>(args)...));
}
template <bool emptyOk>
inline StringPiece FormatArg::splitKey() {
enforce(nextKeyMode_ != NextKeyMode::INT, "integer key expected");
return doSplitKey<emptyOk>();
}
template <bool emptyOk>
inline StringPiece FormatArg::doSplitKey() {
if (nextKeyMode_ == NextKeyMode::STRING) {
nextKeyMode_ = NextKeyMode::NONE;
if (!emptyOk) { // static
enforce(!nextKey_.empty(), "non-empty key required");
}
return nextKey_;
}
if (key_.empty()) {
if (!emptyOk) { // static
error("non-empty key required");
}
return StringPiece();
}
const char* b = key_.begin();
const char* e = key_.end();
const char* p;
if (e[-1] == ']') {
--e;
p = static_cast<const char*>(memchr(b, '[', e - b));
enforce(p, "unmatched ']'");
} else {
p = static_cast<const char*>(memchr(b, '.', e - b));
}
if (p) {
key_.assign(p + 1, e);
} else {
p = e;
key_.clear();
}
if (!emptyOk) { // static
enforce(b != p, "non-empty key required");
}
return StringPiece(b, p);
}
inline int FormatArg::splitIntKey() {
if (nextKeyMode_ == NextKeyMode::INT) {
nextKeyMode_ = NextKeyMode::NONE;
return nextIntKey_;
}
try {
return to<int>(doSplitKey<true>());
} catch (const std::out_of_range& e) {
error("integer key required");
return 0; // unreached
}
}
} // namespace folly

63
ios/Pods/Folly/folly/FormatTraits.h generated Normal file
View File

@ -0,0 +1,63 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <type_traits>
namespace folly { namespace detail {
// Shortcut, so we don't have to use enable_if everywhere
struct FormatTraitsBase {
typedef void enabled;
};
// Traits that define enabled, value_type, and at() for anything
// indexable with integral keys: pointers, arrays, vectors, and maps
// with integral keys
template <class T, class Enable = void> struct IndexableTraits;
// Base class for sequences (vectors, deques)
template <class C>
struct IndexableTraitsSeq : public FormatTraitsBase {
typedef C container_type;
typedef typename C::value_type value_type;
static const value_type& at(const C& c, int idx) {
return c.at(idx);
}
static const value_type& at(const C& c, int idx, const value_type& dflt) {
return (idx >= 0 && size_t(idx) < c.size()) ? c.at(idx) : dflt;
}
};
// Base class for associative types (maps)
template <class C>
struct IndexableTraitsAssoc : public FormatTraitsBase {
typedef typename C::value_type::second_type value_type;
static const value_type& at(const C& c, int idx) {
return c.at(static_cast<typename C::key_type>(idx));
}
static const value_type& at(const C& c, int idx, const value_type& dflt) {
auto pos = c.find(static_cast<typename C::key_type>(idx));
return pos != c.end() ? pos->second : dflt;
}
};
}} // namespaces

785
ios/Pods/Folly/folly/Function.h generated Normal file
View File

@ -0,0 +1,785 @@
/*
* Copyright 2016 Facebook, Inc.
*
* @author Eric Niebler (eniebler@fb.com), Sven Over (over@fb.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Acknowledgements: Giuseppe Ottaviano (ott@fb.com)
*/
/**
* @class Function
*
* @brief A polymorphic function wrapper that is not copyable and does not
* require the wrapped function to be copy constructible.
*
* `folly::Function` is a polymorphic function wrapper, similar to
* `std::function`. The template parameters of the `folly::Function` define
* the parameter signature of the wrapped callable, but not the specific
* type of the embedded callable. E.g. a `folly::Function<int(int)>`
* can wrap callables that return an `int` when passed an `int`. This can be a
* function pointer or any class object implementing one or both of
*
* int operator(int);
* int operator(int) const;
*
* If both are defined, the non-const one takes precedence.
*
* Unlike `std::function`, a `folly::Function` can wrap objects that are not
* copy constructible. As a consequence of this, `folly::Function` itself
* is not copyable, either.
*
* Another difference is that, unlike `std::function`, `folly::Function` treats
* const-ness of methods correctly. While a `std::function` allows to wrap
* an object that only implements a non-const `operator()` and invoke
* a const-reference of the `std::function`, `folly::Function` requires you to
* declare a function type as const in order to be able to execute it on a
* const-reference.
*
* For example:
*
* class Foo {
* public:
* void operator()() {
* // mutates the Foo object
* }
* };
*
* class Bar {
* std::function<void(void)> foo_; // wraps a Foo object
* public:
* void mutateFoo() const
* {
* foo_();
* }
* };
*
* Even though `mutateFoo` is a const-method, so it can only reference `foo_`
* as const, it is able to call the non-const `operator()` of the Foo
* object that is embedded in the foo_ function.
*
* `folly::Function` will not allow you to do that. You will have to decide
* whether you need to invoke your wrapped callable from a const reference
* (like in the example above), in which case it will only wrap a
* `operator() const`. If your functor does not implement that,
* compilation will fail. If you do not require to be able to invoke the
* wrapped function in a const context, you can wrap any functor that
* implements either or both of const and non-const `operator()`.
*
* The template parameter of `folly::Function`, the `FunctionType`, can be
* const-qualified. Be aware that the const is part of the function signature.
* It does not mean that the function type is a const type.
*
* using FunctionType = R(Args...);
* using ConstFunctionType = R(Args...) const;
*
* In this example, `FunctionType` and `ConstFunctionType` are different
* types. `ConstFunctionType` is not the same as `const FunctionType`.
* As a matter of fact, trying to use the latter should emit a compiler
* warning or error, because it has no defined meaning.
*
* // This will not compile:
* folly::Function<void(void) const> func = Foo();
* // because Foo does not have a member function of the form:
* // void operator()() const;
*
* // This will compile just fine:
* folly::Function<void(void)> func = Foo();
* // and it will wrap the existing member function:
* // void operator()();
*
* When should a const function type be used? As a matter of fact, you will
* probably not need to use const function types very often. See the following
* example:
*
* class Bar {
* folly::Function<void()> func_;
* folly::Function<void() const> constFunc_;
*
* void someMethod() {
* // Can call func_.
* func_();
* // Can call constFunc_.
* constFunc_();
* }
*
* void someConstMethod() const {
* // Can call constFunc_.
* constFunc_();
* // However, cannot call func_ because a non-const method cannot
* // be called from a const one.
* }
* };
*
* As you can see, whether the `folly::Function`'s function type should
* be declared const or not is identical to whether a corresponding method
* would be declared const or not.
*
* You only require a `folly::Function` to hold a const function type, if you
* intend to invoke it from within a const context. This is to ensure that
* you cannot mutate its inner state when calling in a const context.
*
* This is how the const/non-const choice relates to lambda functions:
*
* // Non-mutable lambdas: can be stored in a non-const...
* folly::Function<void(int)> print_number =
* [] (int number) { std::cout << number << std::endl; };
*
* // ...as well as in a const folly::Function
* folly::Function<void(int) const> print_number_const =
* [] (int number) { std::cout << number << std::endl; };
*
* // Mutable lambda: can only be stored in a non-const folly::Function:
* int number = 0;
* folly::Function<void()> print_number =
* [number] () mutable { std::cout << ++number << std::endl; };
* // Trying to store the above mutable lambda in a
* // `folly::Function<void() const>` would lead to a compiler error:
* // error: no viable conversion from '(lambda at ...)' to
* // 'folly::Function<void () const>'
*
* Casting between const and non-const `folly::Function`s:
* conversion from const to non-const signatures happens implicitly. Any
* function that takes a `folly::Function<R(Args...)>` can be passed
* a `folly::Function<R(Args...) const>` without explicit conversion.
* This is safe, because casting from const to non-const only entails giving
* up the ability to invoke the function from a const context.
* Casting from a non-const to a const signature is potentially dangerous,
* as it means that a function that may change its inner state when invoked
* is made possible to call from a const context. Therefore this cast does
* not happen implicitly. The function `folly::constCastFunction` can
* be used to perform the cast.
*
* // Mutable lambda: can only be stored in a non-const folly::Function:
* int number = 0;
* folly::Function<void()> print_number =
* [number] () mutable { std::cout << ++number << std::endl; };
*
* // const-cast to a const folly::Function:
* folly::Function<void() const> print_number_const =
* constCastFunction(std::move(print_number));
*
* When to use const function types?
* Generally, only when you need them. When you use a `folly::Function` as a
* member of a struct or class, only use a const function signature when you
* need to invoke the function from const context.
* When passing a `folly::Function` to a function, the function should accept
* a non-const `folly::Function` whenever possible, i.e. when it does not
* need to pass on or store a const `folly::Function`. This is the least
* possible constraint: you can always pass a const `folly::Function` when
* the function accepts a non-const one.
*
* How does the const behaviour compare to `std::function`?
* `std::function` can wrap object with non-const invokation behaviour but
* exposes them as const. The equivalent behaviour can be achieved with
* `folly::Function` like so:
*
* std::function<void(void)> stdfunc = someCallable;
*
* folly::Function<void(void) const> uniqfunc = constCastFunction(
* folly::Function<void(void)>(someCallable)
* );
*
* You need to wrap the callable first in a non-const `folly::Function` to
* select a non-const invoke operator (or the const one if no non-const one is
* present), and then move it into a const `folly::Function` using
* `constCastFunction`.
* The name of `constCastFunction` should warn you that something
* potentially dangerous is happening. As a matter of fact, using
* `std::function` always involves this potentially dangerous aspect, which
* is why it is not considered fully const-safe or even const-correct.
* However, in most of the cases you will not need the dangerous aspect at all.
* Either you do not require invokation of the function from a const context,
* in which case you do not need to use `constCastFunction` and just
* use the inner `folly::Function` in the example above, i.e. just use a
* non-const `folly::Function`. Or, you may need invokation from const, but
* the callable you are wrapping does not mutate its state (e.g. it is a class
* object and implements `operator() const`, or it is a normal,
* non-mutable lambda), in which case you can wrap the callable in a const
* `folly::Function` directly, without using `constCastFunction`.
* Only if you require invokation from a const context of a callable that
* may mutate itself when invoked you have to go through the above procedure.
* However, in that case what you do is potentially dangerous and requires
* the equivalent of a `const_cast`, hence you need to call
* `constCastFunction`.
*/
#pragma once
#include <functional>
#include <memory>
#include <new>
#include <type_traits>
#include <utility>
#include <folly/CppAttributes.h>
#include <folly/Portability.h>
namespace folly {
template <typename FunctionType>
class Function;
template <typename ReturnType, typename... Args>
Function<ReturnType(Args...) const> constCastFunction(
Function<ReturnType(Args...)>&&) noexcept;
namespace detail {
namespace function {
enum class Op { MOVE, NUKE, FULL, HEAP };
union Data {
void* big;
std::aligned_storage<6 * sizeof(void*)>::type tiny;
};
template <typename Fun, typename FunT = typename std::decay<Fun>::type>
using IsSmall = std::integral_constant<
bool,
(sizeof(FunT) <= sizeof(Data::tiny) &&
// Same as is_nothrow_move_constructible, but w/ no template instantiation.
noexcept(FunT(std::declval<FunT&&>())))>;
using SmallTag = std::true_type;
using HeapTag = std::false_type;
struct CoerceTag {};
template <typename T>
bool isNullPtrFn(T* p) {
return p == nullptr;
}
template <typename T>
std::false_type isNullPtrFn(T&&) {
return {};
}
inline bool uninitNoop(Op, Data*, Data*) {
return false;
}
template <typename FunctionType>
struct FunctionTraits;
template <typename ReturnType, typename... Args>
struct FunctionTraits<ReturnType(Args...)> {
using Call = ReturnType (*)(Data&, Args&&...);
using IsConst = std::false_type;
using ConstSignature = ReturnType(Args...) const;
using NonConstSignature = ReturnType(Args...);
using OtherSignature = ConstSignature;
template <typename F, typename G = typename std::decay<F>::type>
using ResultOf = decltype(
static_cast<ReturnType>(std::declval<G&>()(std::declval<Args>()...)));
template <typename Fun>
static ReturnType callSmall(Data& p, Args&&... args) {
return static_cast<ReturnType>((*static_cast<Fun*>(
static_cast<void*>(&p.tiny)))(static_cast<Args&&>(args)...));
}
template <typename Fun>
static ReturnType callBig(Data& p, Args&&... args) {
return static_cast<ReturnType>(
(*static_cast<Fun*>(p.big))(static_cast<Args&&>(args)...));
}
static ReturnType uninitCall(Data&, Args&&...) {
throw std::bad_function_call();
}
ReturnType operator()(Args... args) {
auto& fn = *static_cast<Function<ReturnType(Args...)>*>(this);
return fn.call_(fn.data_, static_cast<Args&&>(args)...);
}
class SharedProxy {
std::shared_ptr<Function<ReturnType(Args...)>> sp_;
public:
explicit SharedProxy(Function<ReturnType(Args...)>&& func)
: sp_(std::make_shared<Function<ReturnType(Args...)>>(
std::move(func))) {}
ReturnType operator()(Args&&... args) const {
return (*sp_)(static_cast<Args&&>(args)...);
}
};
};
template <typename ReturnType, typename... Args>
struct FunctionTraits<ReturnType(Args...) const> {
using Call = ReturnType (*)(Data&, Args&&...);
using IsConst = std::true_type;
using ConstSignature = ReturnType(Args...) const;
using NonConstSignature = ReturnType(Args...);
using OtherSignature = NonConstSignature;
template <typename F, typename G = typename std::decay<F>::type>
using ResultOf = decltype(static_cast<ReturnType>(
std::declval<const G&>()(std::declval<Args>()...)));
template <typename Fun>
static ReturnType callSmall(Data& p, Args&&... args) {
return static_cast<ReturnType>((*static_cast<const Fun*>(
static_cast<void*>(&p.tiny)))(static_cast<Args&&>(args)...));
}
template <typename Fun>
static ReturnType callBig(Data& p, Args&&... args) {
return static_cast<ReturnType>(
(*static_cast<const Fun*>(p.big))(static_cast<Args&&>(args)...));
}
static ReturnType uninitCall(Data&, Args&&...) {
throw std::bad_function_call();
}
ReturnType operator()(Args... args) const {
auto& fn = *static_cast<const Function<ReturnType(Args...) const>*>(this);
return fn.call_(fn.data_, static_cast<Args&&>(args)...);
}
struct SharedProxy {
std::shared_ptr<Function<ReturnType(Args...) const>> sp_;
public:
explicit SharedProxy(Function<ReturnType(Args...) const>&& func)
: sp_(std::make_shared<Function<ReturnType(Args...) const>>(
std::move(func))) {}
ReturnType operator()(Args&&... args) const {
return (*sp_)(static_cast<Args&&>(args)...);
}
};
};
template <typename Fun>
bool execSmall(Op o, Data* src, Data* dst) {
switch (o) {
case Op::MOVE:
::new (static_cast<void*>(&dst->tiny))
Fun(std::move(*static_cast<Fun*>(static_cast<void*>(&src->tiny))));
FOLLY_FALLTHROUGH;
case Op::NUKE:
static_cast<Fun*>(static_cast<void*>(&src->tiny))->~Fun();
break;
case Op::FULL:
return true;
case Op::HEAP:
break;
}
return false;
}
template <typename Fun>
bool execBig(Op o, Data* src, Data* dst) {
switch (o) {
case Op::MOVE:
dst->big = src->big;
src->big = nullptr;
break;
case Op::NUKE:
delete static_cast<Fun*>(src->big);
break;
case Op::FULL:
case Op::HEAP:
break;
}
return true;
}
// Invoke helper
template <typename F, typename... Args>
inline auto invoke(F&& f, Args&&... args)
-> decltype(std::forward<F>(f)(std::forward<Args>(args)...)) {
return std::forward<F>(f)(std::forward<Args>(args)...);
}
template <typename M, typename C, typename... Args>
inline auto invoke(M(C::*d), Args&&... args)
-> decltype(std::mem_fn(d)(std::forward<Args>(args)...)) {
return std::mem_fn(d)(std::forward<Args>(args)...);
}
} // namespace function
} // namespace detail
FOLLY_PUSH_WARNING
FOLLY_MSVC_DISABLE_WARNING(4521) // Multiple copy constructors
FOLLY_MSVC_DISABLE_WARNING(4522) // Multiple assignment operators
template <typename FunctionType>
class Function final : private detail::function::FunctionTraits<FunctionType> {
// These utility types are defined outside of the template to reduce
// the number of instantiations, and then imported in the class
// namespace for convenience.
using Data = detail::function::Data;
using Op = detail::function::Op;
using SmallTag = detail::function::SmallTag;
using HeapTag = detail::function::HeapTag;
using CoerceTag = detail::function::CoerceTag;
using Traits = detail::function::FunctionTraits<FunctionType>;
using Call = typename Traits::Call;
using Exec = bool (*)(Op, Data*, Data*);
template <typename Fun>
using IsSmall = detail::function::IsSmall<Fun>;
using OtherSignature = typename Traits::OtherSignature;
// The `data_` member is mutable to allow `constCastFunction` to work without
// invoking undefined behavior. Const-correctness is only violated when
// `FunctionType` is a const function type (e.g., `int() const`) and `*this`
// is the result of calling `constCastFunction`.
mutable Data data_;
Call call_{&Traits::uninitCall};
Exec exec_{&detail::function::uninitNoop};
friend Traits;
friend Function<typename Traits::ConstSignature> folly::constCastFunction<>(
Function<typename Traits::NonConstSignature>&&) noexcept;
friend class Function<OtherSignature>;
template <typename Fun>
Function(Fun&& fun, SmallTag) noexcept {
using FunT = typename std::decay<Fun>::type;
if (!detail::function::isNullPtrFn(fun)) {
::new (static_cast<void*>(&data_.tiny)) FunT(static_cast<Fun&&>(fun));
call_ = &Traits::template callSmall<FunT>;
exec_ = &detail::function::execSmall<FunT>;
}
}
template <typename Fun>
Function(Fun&& fun, HeapTag) {
using FunT = typename std::decay<Fun>::type;
data_.big = new FunT(static_cast<Fun&&>(fun));
call_ = &Traits::template callBig<FunT>;
exec_ = &detail::function::execBig<FunT>;
}
Function(Function<OtherSignature>&& that, CoerceTag) noexcept {
that.exec_(Op::MOVE, &that.data_, &data_);
std::swap(call_, that.call_);
std::swap(exec_, that.exec_);
}
public:
/**
* Default constructor. Constructs an empty Function.
*/
Function() = default;
// not copyable
// NOTE: Deleting the non-const copy constructor is unusual but necessary to
// prevent copies from non-const `Function` object from selecting the
// perfect forwarding implicit converting constructor below
// (i.e., `template <typename Fun> Function(Fun&&)`).
Function(Function&) = delete;
Function(const Function&) = delete;
Function(const Function&&) = delete;
/**
* Move constructor
*/
Function(Function&& that) noexcept {
that.exec_(Op::MOVE, &that.data_, &data_);
std::swap(call_, that.call_);
std::swap(exec_, that.exec_);
}
/**
* Constructs an empty `Function`.
*/
/* implicit */ Function(std::nullptr_t) noexcept {}
/**
* Constructs a new `Function` from any callable object. This
* handles function pointers, pointers to static member functions,
* `std::reference_wrapper` objects, `std::function` objects, and arbitrary
* objects that implement `operator()` if the parameter signature
* matches (i.e. it returns R when called with Args...).
* For a `Function` with a const function type, the object must be
* callable from a const-reference, i.e. implement `operator() const`.
* For a `Function` with a non-const function type, the object will
* be called from a non-const reference, which means that it will execute
* a non-const `operator()` if it is defined, and falls back to
* `operator() const` otherwise.
*
* \note `typename = ResultOf<Fun>` prevents this overload from being
* selected by overload resolution when `fun` is not a compatible function.
*/
template <class Fun, typename = typename Traits::template ResultOf<Fun>>
/* implicit */ Function(Fun&& fun) noexcept(IsSmall<Fun>::value)
: Function(static_cast<Fun&&>(fun), IsSmall<Fun>{}) {}
/**
* For moving a `Function<X(Ys..) const>` into a `Function<X(Ys...)>`.
*/
template <
bool Const = Traits::IsConst::value,
typename std::enable_if<!Const, int>::type = 0>
Function(Function<OtherSignature>&& that) noexcept
: Function(std::move(that), CoerceTag{}) {}
/**
* If `ptr` is null, constructs an empty `Function`. Otherwise,
* this constructor is equivalent to `Function(std::mem_fn(ptr))`.
*/
template <
typename Member,
typename Class,
// Prevent this overload from being selected when `ptr` is not a
// compatible member function pointer.
typename = decltype(Function(std::mem_fn((Member Class::*)0)))>
/* implicit */ Function(Member Class::*ptr) noexcept {
if (ptr) {
*this = std::mem_fn(ptr);
}
}
~Function() {
exec_(Op::NUKE, &data_, nullptr);
}
Function& operator=(Function&) = delete;
Function& operator=(const Function&) = delete;
/**
* Move assignment operator
*/
Function& operator=(Function&& that) noexcept {
if (&that != this) {
// Q: Why is is safe to destroy and reconstruct this object in place?
// A: Two reasons: First, `Function` is a final class, so in doing this
// we aren't slicing off any derived parts. And second, the move
// operation is guaranteed not to throw so we always leave the object
// in a valid state.
this->~Function();
::new (this) Function(std::move(that));
}
return *this;
}
/**
* Assigns a callable object to this `Function`. If the operation fails,
* `*this` is left unmodified.
*
* \note `typename = ResultOf<Fun>` prevents this overload from being
* selected by overload resolution when `fun` is not a compatible function.
*/
template <class Fun, typename = typename Traits::template ResultOf<Fun>>
Function& operator=(Fun&& fun) noexcept(
noexcept(/* implicit */ Function(std::declval<Fun>()))) {
// Doing this in place is more efficient when we can do so safely.
if (noexcept(/* implicit */ Function(std::declval<Fun>()))) {
// Q: Why is is safe to destroy and reconstruct this object in place?
// A: See the explanation in the move assignment operator.
this->~Function();
::new (this) Function(static_cast<Fun&&>(fun));
} else {
// Construct a temporary and (nothrow) swap.
Function(static_cast<Fun&&>(fun)).swap(*this);
}
return *this;
}
/**
* Clears this `Function`.
*/
Function& operator=(std::nullptr_t) noexcept {
return (*this = Function());
}
/**
* If `ptr` is null, clears this `Function`. Otherwise, this assignment
* operator is equivalent to `*this = std::mem_fn(ptr)`.
*/
template <typename Member, typename Class>
auto operator=(Member Class::*ptr) noexcept
// Prevent this overload from being selected when `ptr` is not a
// compatible member function pointer.
-> decltype(operator=(std::mem_fn(ptr))) {
return ptr ? (*this = std::mem_fn(ptr)) : (*this = Function());
}
/**
* Call the wrapped callable object with the specified arguments.
*/
using Traits::operator();
/**
* Exchanges the callable objects of `*this` and `that`.
*/
void swap(Function& that) noexcept {
std::swap(*this, that);
}
/**
* Returns `true` if this `Function` contains a callable, i.e. is
* non-empty.
*/
explicit operator bool() const noexcept {
return exec_(Op::FULL, nullptr, nullptr);
}
/**
* Returns `true` if this `Function` stores the callable on the
* heap. If `false` is returned, there has been no additional memory
* allocation and the callable is stored inside the `Function`
* object itself.
*/
bool hasAllocatedMemory() const noexcept {
return exec_(Op::HEAP, nullptr, nullptr);
}
using typename Traits::SharedProxy;
/**
* Move this `Function` into a copyable callable object, of which all copies
* share the state.
*/
SharedProxy asSharedProxy() && {
return SharedProxy{std::move(*this)};
}
/**
* Construct a `std::function` by moving in the contents of this `Function`.
* Note that the returned `std::function` will share its state (i.e. captured
* data) across all copies you make of it, so be very careful when copying.
*/
std::function<typename Traits::NonConstSignature> asStdFunction() && {
return std::move(*this).asSharedProxy();
}
};
FOLLY_POP_WARNING
template <typename FunctionType>
void swap(Function<FunctionType>& lhs, Function<FunctionType>& rhs) noexcept {
lhs.swap(rhs);
}
template <typename FunctionType>
bool operator==(const Function<FunctionType>& fn, std::nullptr_t) {
return !fn;
}
template <typename FunctionType>
bool operator==(std::nullptr_t, const Function<FunctionType>& fn) {
return !fn;
}
template <typename FunctionType>
bool operator!=(const Function<FunctionType>& fn, std::nullptr_t) {
return !(fn == nullptr);
}
template <typename FunctionType>
bool operator!=(std::nullptr_t, const Function<FunctionType>& fn) {
return !(nullptr == fn);
}
/**
* NOTE: See detailed note about `constCastFunction` at the top of the file.
* This is potentially dangerous and requires the equivalent of a `const_cast`.
*/
template <typename ReturnType, typename... Args>
Function<ReturnType(Args...) const> constCastFunction(
Function<ReturnType(Args...)>&& that) noexcept {
return Function<ReturnType(Args...) const>{std::move(that),
detail::function::CoerceTag{}};
}
template <typename ReturnType, typename... Args>
Function<ReturnType(Args...) const> constCastFunction(
Function<ReturnType(Args...) const>&& that) noexcept {
return std::move(that);
}
/**
* @class FunctionRef
*
* @brief A reference wrapper for callable objects
*
* FunctionRef is similar to std::reference_wrapper, but the template parameter
* is the function signature type rather than the type of the referenced object.
* A folly::FunctionRef is cheap to construct as it contains only a pointer to
* the referenced callable and a pointer to a function which invokes the
* callable.
*
* The user of FunctionRef must be aware of the reference semantics: storing a
* copy of a FunctionRef is potentially dangerous and should be avoided unless
* the referenced object definitely outlives the FunctionRef object. Thus any
* function that accepts a FunctionRef parameter should only use it to invoke
* the referenced function and not store a copy of it. Knowing that FunctionRef
* itself has reference semantics, it is generally okay to use it to reference
* lambdas that capture by reference.
*/
template <typename FunctionType>
class FunctionRef;
template <typename ReturnType, typename... Args>
class FunctionRef<ReturnType(Args...)> final {
using Call = ReturnType (*)(void*, Args&&...);
void* object_{nullptr};
Call call_{&FunctionRef::uninitCall};
static ReturnType uninitCall(void*, Args&&...) {
throw std::bad_function_call();
}
template <typename Fun>
static ReturnType call(void* object, Args&&... args) {
return static_cast<ReturnType>(detail::function::invoke(
*static_cast<Fun*>(object), static_cast<Args&&>(args)...));
}
public:
/**
* Default constructor. Constructs an empty FunctionRef.
*
* Invoking it will throw std::bad_function_call.
*/
FunctionRef() = default;
/**
* Construct a FunctionRef from a reference to a callable object.
*/
template <typename Fun>
/* implicit */ FunctionRef(Fun&& fun) noexcept {
using ReferencedType = typename std::remove_reference<Fun>::type;
static_assert(
std::is_convertible<
typename std::result_of<ReferencedType&(Args && ...)>::type,
ReturnType>::value,
"FunctionRef cannot be constructed from object with "
"incompatible function signature");
// `Fun` may be a const type, in which case we have to do a const_cast
// to store the address in a `void*`. This is safe because the `void*`
// will be cast back to `Fun*` (which is a const pointer whenever `Fun`
// is a const type) inside `FunctionRef::call`
object_ = const_cast<void*>(static_cast<void const*>(std::addressof(fun)));
call_ = &FunctionRef::call<ReferencedType>;
}
ReturnType operator()(Args... args) const {
return call_(object_, static_cast<Args&&>(args)...);
}
};
} // namespace folly

622
ios/Pods/Folly/folly/GroupVarint.h generated Normal file
View File

@ -0,0 +1,622 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#if !defined(__GNUC__) && !defined(_MSC_VER)
#error GroupVarint.h requires GCC or MSVC
#endif
#include <folly/Portability.h>
#if FOLLY_X64 || defined(__i386__) || FOLLY_PPC64 || FOLLY_A64
#define HAVE_GROUP_VARINT 1
#include <cstdint>
#include <limits>
#include <folly/detail/GroupVarintDetail.h>
#include <folly/Bits.h>
#include <folly/Range.h>
#include <folly/portability/Builtins.h>
#include <glog/logging.h>
#if FOLLY_SSE >= 3
#include <nmmintrin.h>
namespace folly {
namespace detail {
alignas(16) extern const uint64_t groupVarintSSEMasks[];
} // namespace detail
} // namespace folly
#endif
namespace folly {
namespace detail {
extern const uint8_t groupVarintLengths[];
} // namespace detail
} // namespace folly
namespace folly {
template <typename T>
class GroupVarint;
/**
* GroupVarint encoding for 32-bit values.
*
* Encodes 4 32-bit integers at once, each using 1-4 bytes depending on size.
* There is one byte of overhead. (The first byte contains the lengths of
* the four integers encoded as two bits each; 00=1 byte .. 11=4 bytes)
*
* This implementation assumes little-endian and does unaligned 32-bit
* accesses, so it's basically not portable outside of the x86[_64] world.
*/
template <>
class GroupVarint<uint32_t> : public detail::GroupVarintBase<uint32_t> {
public:
/**
* Return the number of bytes used to encode these four values.
*/
static size_t size(uint32_t a, uint32_t b, uint32_t c, uint32_t d) {
return kHeaderSize + kGroupSize + key(a) + key(b) + key(c) + key(d);
}
/**
* Return the number of bytes used to encode four uint32_t values stored
* at consecutive positions in an array.
*/
static size_t size(const uint32_t* p) {
return size(p[0], p[1], p[2], p[3]);
}
/**
* Return the number of bytes used to encode count (<= 4) values.
* If you clip a buffer after these many bytes, you can still decode
* the first "count" values correctly (if the remaining size() -
* partialSize() bytes are filled with garbage).
*/
static size_t partialSize(const type* p, size_t count) {
DCHECK_LE(count, kGroupSize);
size_t s = kHeaderSize + count;
for (; count; --count, ++p) {
s += key(*p);
}
return s;
}
/**
* Return the number of values from *p that are valid from an encoded
* buffer of size bytes.
*/
static size_t partialCount(const char* p, size_t size) {
char v = *p;
size_t s = kHeaderSize;
s += 1 + b0key(v);
if (s > size) return 0;
s += 1 + b1key(v);
if (s > size) return 1;
s += 1 + b2key(v);
if (s > size) return 2;
s += 1 + b3key(v);
if (s > size) return 3;
return 4;
}
/**
* Given a pointer to the beginning of an GroupVarint32-encoded block,
* return the number of bytes used by the encoding.
*/
static size_t encodedSize(const char* p) {
return (kHeaderSize + kGroupSize +
b0key(*p) + b1key(*p) + b2key(*p) + b3key(*p));
}
/**
* Encode four uint32_t values into the buffer pointed-to by p, and return
* the next position in the buffer (that is, one character past the last
* encoded byte). p needs to have at least size()+4 bytes available.
*/
static char* encode(char* p, uint32_t a, uint32_t b, uint32_t c, uint32_t d) {
uint8_t b0key = key(a);
uint8_t b1key = key(b);
uint8_t b2key = key(c);
uint8_t b3key = key(d);
*p++ = (b3key << 6) | (b2key << 4) | (b1key << 2) | b0key;
storeUnaligned(p, a);
p += b0key+1;
storeUnaligned(p, b);
p += b1key+1;
storeUnaligned(p, c);
p += b2key+1;
storeUnaligned(p, d);
p += b3key+1;
return p;
}
/**
* Encode four uint32_t values from the array pointed-to by src into the
* buffer pointed-to by p, similar to encode(p,a,b,c,d) above.
*/
static char* encode(char* p, const uint32_t* src) {
return encode(p, src[0], src[1], src[2], src[3]);
}
/**
* Decode four uint32_t values from a buffer, and return the next position
* in the buffer (that is, one character past the last encoded byte).
* The buffer needs to have at least 3 extra bytes available (they
* may be read but ignored).
*/
static const char* decode_simple(const char* p, uint32_t* a, uint32_t* b,
uint32_t* c, uint32_t* d) {
size_t k = loadUnaligned<uint8_t>(p);
const char* end = p + detail::groupVarintLengths[k];
++p;
size_t k0 = b0key(k);
*a = loadUnaligned<uint32_t>(p) & kMask[k0];
p += k0+1;
size_t k1 = b1key(k);
*b = loadUnaligned<uint32_t>(p) & kMask[k1];
p += k1+1;
size_t k2 = b2key(k);
*c = loadUnaligned<uint32_t>(p) & kMask[k2];
p += k2+1;
size_t k3 = b3key(k);
*d = loadUnaligned<uint32_t>(p) & kMask[k3];
// p += k3+1;
return end;
}
/**
* Decode four uint32_t values from a buffer and store them in the array
* pointed-to by dest, similar to decode(p,a,b,c,d) above.
*/
static const char* decode_simple(const char* p, uint32_t* dest) {
return decode_simple(p, dest, dest+1, dest+2, dest+3);
}
#if FOLLY_SSE >= 3
/**
* Just like the non-SSSE3 decode below, but with the additional constraint
* that we must be able to read at least 17 bytes from the input pointer, p.
*/
static const char* decode(const char* p, uint32_t* dest) {
uint8_t key = p[0];
__m128i val = _mm_loadu_si128((const __m128i*)(p+1));
__m128i mask =
_mm_load_si128((const __m128i*)&detail::groupVarintSSEMasks[key * 2]);
__m128i r = _mm_shuffle_epi8(val, mask);
_mm_storeu_si128((__m128i*)dest, r);
return p + detail::groupVarintLengths[key];
}
/**
* Just like decode_simple, but with the additional constraint that
* we must be able to read at least 17 bytes from the input pointer, p.
*/
static const char* decode(const char* p, uint32_t* a, uint32_t* b,
uint32_t* c, uint32_t* d) {
uint8_t key = p[0];
__m128i val = _mm_loadu_si128((const __m128i*)(p+1));
__m128i mask =
_mm_load_si128((const __m128i*)&detail::groupVarintSSEMasks[key * 2]);
__m128i r = _mm_shuffle_epi8(val, mask);
// Extracting 32 bits at a time out of an XMM register is a SSE4 feature
#if FOLLY_SSE >= 4
*a = _mm_extract_epi32(r, 0);
*b = _mm_extract_epi32(r, 1);
*c = _mm_extract_epi32(r, 2);
*d = _mm_extract_epi32(r, 3);
#else /* !__SSE4__ */
*a = _mm_extract_epi16(r, 0) + (_mm_extract_epi16(r, 1) << 16);
*b = _mm_extract_epi16(r, 2) + (_mm_extract_epi16(r, 3) << 16);
*c = _mm_extract_epi16(r, 4) + (_mm_extract_epi16(r, 5) << 16);
*d = _mm_extract_epi16(r, 6) + (_mm_extract_epi16(r, 7) << 16);
#endif /* __SSE4__ */
return p + detail::groupVarintLengths[key];
}
#else /* !__SSSE3__ */
static const char* decode(const char* p, uint32_t* a, uint32_t* b,
uint32_t* c, uint32_t* d) {
return decode_simple(p, a, b, c, d);
}
static const char* decode(const char* p, uint32_t* dest) {
return decode_simple(p, dest);
}
#endif /* __SSSE3__ */
private:
static uint8_t key(uint32_t x) {
// __builtin_clz is undefined for the x==0 case
return 3 - (__builtin_clz(x|1) / 8);
}
static size_t b0key(size_t x) { return x & 3; }
static size_t b1key(size_t x) { return (x >> 2) & 3; }
static size_t b2key(size_t x) { return (x >> 4) & 3; }
static size_t b3key(size_t x) { return (x >> 6) & 3; }
static const uint32_t kMask[];
};
/**
* GroupVarint encoding for 64-bit values.
*
* Encodes 5 64-bit integers at once, each using 1-8 bytes depending on size.
* There are two bytes of overhead. (The first two bytes contain the lengths
* of the five integers encoded as three bits each; 000=1 byte .. 111 = 8 bytes)
*
* This implementation assumes little-endian and does unaligned 64-bit
* accesses, so it's basically not portable outside of the x86[_64] world.
*/
template <>
class GroupVarint<uint64_t> : public detail::GroupVarintBase<uint64_t> {
public:
/**
* Return the number of bytes used to encode these five values.
*/
static size_t size(uint64_t a, uint64_t b, uint64_t c, uint64_t d,
uint64_t e) {
return (kHeaderSize + kGroupSize +
key(a) + key(b) + key(c) + key(d) + key(e));
}
/**
* Return the number of bytes used to encode five uint64_t values stored
* at consecutive positions in an array.
*/
static size_t size(const uint64_t* p) {
return size(p[0], p[1], p[2], p[3], p[4]);
}
/**
* Return the number of bytes used to encode count (<= 4) values.
* If you clip a buffer after these many bytes, you can still decode
* the first "count" values correctly (if the remaining size() -
* partialSize() bytes are filled with garbage).
*/
static size_t partialSize(const type* p, size_t count) {
DCHECK_LE(count, kGroupSize);
size_t s = kHeaderSize + count;
for (; count; --count, ++p) {
s += key(*p);
}
return s;
}
/**
* Return the number of values from *p that are valid from an encoded
* buffer of size bytes.
*/
static size_t partialCount(const char* p, size_t size) {
uint16_t v = loadUnaligned<uint16_t>(p);
size_t s = kHeaderSize;
s += 1 + b0key(v);
if (s > size) return 0;
s += 1 + b1key(v);
if (s > size) return 1;
s += 1 + b2key(v);
if (s > size) return 2;
s += 1 + b3key(v);
if (s > size) return 3;
s += 1 + b4key(v);
if (s > size) return 4;
return 5;
}
/**
* Given a pointer to the beginning of an GroupVarint64-encoded block,
* return the number of bytes used by the encoding.
*/
static size_t encodedSize(const char* p) {
uint16_t n = loadUnaligned<uint16_t>(p);
return (kHeaderSize + kGroupSize +
b0key(n) + b1key(n) + b2key(n) + b3key(n) + b4key(n));
}
/**
* Encode five uint64_t values into the buffer pointed-to by p, and return
* the next position in the buffer (that is, one character past the last
* encoded byte). p needs to have at least size()+8 bytes available.
*/
static char* encode(char* p, uint64_t a, uint64_t b, uint64_t c,
uint64_t d, uint64_t e) {
uint8_t b0key = key(a);
uint8_t b1key = key(b);
uint8_t b2key = key(c);
uint8_t b3key = key(d);
uint8_t b4key = key(e);
storeUnaligned<uint16_t>(
p,
(b4key << 12) | (b3key << 9) | (b2key << 6) | (b1key << 3) | b0key);
p += 2;
storeUnaligned(p, a);
p += b0key+1;
storeUnaligned(p, b);
p += b1key+1;
storeUnaligned(p, c);
p += b2key+1;
storeUnaligned(p, d);
p += b3key+1;
storeUnaligned(p, e);
p += b4key+1;
return p;
}
/**
* Encode five uint64_t values from the array pointed-to by src into the
* buffer pointed-to by p, similar to encode(p,a,b,c,d,e) above.
*/
static char* encode(char* p, const uint64_t* src) {
return encode(p, src[0], src[1], src[2], src[3], src[4]);
}
/**
* Decode five uint64_t values from a buffer, and return the next position
* in the buffer (that is, one character past the last encoded byte).
* The buffer needs to have at least 7 bytes available (they may be read
* but ignored).
*/
static const char* decode(const char* p, uint64_t* a, uint64_t* b,
uint64_t* c, uint64_t* d, uint64_t* e) {
uint16_t k = loadUnaligned<uint16_t>(p);
p += 2;
uint8_t k0 = b0key(k);
*a = loadUnaligned<uint64_t>(p) & kMask[k0];
p += k0+1;
uint8_t k1 = b1key(k);
*b = loadUnaligned<uint64_t>(p) & kMask[k1];
p += k1+1;
uint8_t k2 = b2key(k);
*c = loadUnaligned<uint64_t>(p) & kMask[k2];
p += k2+1;
uint8_t k3 = b3key(k);
*d = loadUnaligned<uint64_t>(p) & kMask[k3];
p += k3+1;
uint8_t k4 = b4key(k);
*e = loadUnaligned<uint64_t>(p) & kMask[k4];
p += k4+1;
return p;
}
/**
* Decode five uint64_t values from a buffer and store them in the array
* pointed-to by dest, similar to decode(p,a,b,c,d,e) above.
*/
static const char* decode(const char* p, uint64_t* dest) {
return decode(p, dest, dest+1, dest+2, dest+3, dest+4);
}
private:
enum { kHeaderBytes = 2 };
static uint8_t key(uint64_t x) {
// __builtin_clzll is undefined for the x==0 case
return 7 - (__builtin_clzll(x|1) / 8);
}
static uint8_t b0key(uint16_t x) { return x & 7; }
static uint8_t b1key(uint16_t x) { return (x >> 3) & 7; }
static uint8_t b2key(uint16_t x) { return (x >> 6) & 7; }
static uint8_t b3key(uint16_t x) { return (x >> 9) & 7; }
static uint8_t b4key(uint16_t x) { return (x >> 12) & 7; }
static const uint64_t kMask[];
};
typedef GroupVarint<uint32_t> GroupVarint32;
typedef GroupVarint<uint64_t> GroupVarint64;
/**
* Simplify use of GroupVarint* for the case where data is available one
* entry at a time (instead of one group at a time). Handles buffering
* and an incomplete last chunk.
*
* Output is a function object that accepts character ranges:
* out(StringPiece) appends the given character range to the output.
*/
template <class T, class Output>
class GroupVarintEncoder {
public:
typedef GroupVarint<T> Base;
typedef T type;
explicit GroupVarintEncoder(Output out)
: out_(out),
count_(0) {
}
~GroupVarintEncoder() {
finish();
}
/**
* Add a value to the encoder.
*/
void add(type val) {
buf_[count_++] = val;
if (count_ == Base::kGroupSize) {
char* p = Base::encode(tmp_, buf_);
out_(StringPiece(tmp_, p));
count_ = 0;
}
}
/**
* Finish encoding, flushing any buffered values if necessary.
* After finish(), the encoder is immediately ready to encode more data
* to the same output.
*/
void finish() {
if (count_) {
// This is not strictly necessary, but it makes testing easy;
// uninitialized bytes are guaranteed to be recorded as taking one byte
// (not more).
for (size_t i = count_; i < Base::kGroupSize; i++) {
buf_[i] = 0;
}
Base::encode(tmp_, buf_);
out_(StringPiece(tmp_, Base::partialSize(buf_, count_)));
count_ = 0;
}
}
/**
* Return the appender that was used.
*/
Output& output() {
return out_;
}
const Output& output() const {
return out_;
}
/**
* Reset the encoder, disregarding any state (except what was already
* flushed to the output, of course).
*/
void clear() {
count_ = 0;
}
private:
Output out_;
char tmp_[Base::kMaxSize];
type buf_[Base::kGroupSize];
size_t count_;
};
/**
* Simplify use of GroupVarint* for the case where the last group in the
* input may be incomplete (but the exact size of the input is known).
* Allows for extracting values one at a time.
*/
template <typename T>
class GroupVarintDecoder {
public:
typedef GroupVarint<T> Base;
typedef T type;
GroupVarintDecoder() = default;
explicit GroupVarintDecoder(StringPiece data,
size_t maxCount = (size_t)-1)
: rrest_(data.end()),
p_(data.data()),
end_(data.end()),
limit_(end_),
pos_(0),
count_(0),
remaining_(maxCount) {
}
void reset(StringPiece data, size_t maxCount = (size_t)-1) {
rrest_ = data.end();
p_ = data.data();
end_ = data.end();
limit_ = end_;
pos_ = 0;
count_ = 0;
remaining_ = maxCount;
}
/**
* Read and return the next value.
*/
bool next(type* val) {
if (pos_ == count_) {
// refill
size_t rem = end_ - p_;
if (rem == 0 || remaining_ == 0) {
return false;
}
// next() attempts to read one full group at a time, and so we must have
// at least enough bytes readable after its end to handle the case if the
// last group is full.
//
// The best way to ensure this is to ensure that data has at least
// Base::kMaxSize - 1 bytes readable *after* the end, otherwise we'll copy
// into a temporary buffer.
if (limit_ - p_ < Base::kMaxSize) {
memcpy(tmp_, p_, rem);
p_ = tmp_;
end_ = p_ + rem;
limit_ = tmp_ + sizeof(tmp_);
}
pos_ = 0;
const char* n = Base::decode(p_, buf_);
if (n <= end_) {
// Full group could be decoded
if (remaining_ >= Base::kGroupSize) {
remaining_ -= Base::kGroupSize;
count_ = Base::kGroupSize;
p_ = n;
} else {
count_ = remaining_;
remaining_ = 0;
p_ += Base::partialSize(buf_, count_);
}
} else {
// Can't decode a full group
count_ = Base::partialCount(p_, end_ - p_);
if (remaining_ >= count_) {
remaining_ -= count_;
p_ = end_;
} else {
count_ = remaining_;
remaining_ = 0;
p_ += Base::partialSize(buf_, count_);
}
if (count_ == 0) {
return false;
}
}
}
*val = buf_[pos_++];
return true;
}
StringPiece rest() const {
// This is only valid after next() returned false
CHECK(pos_ == count_ && (p_ == end_ || remaining_ == 0));
// p_ may point to the internal buffer (tmp_), but we want
// to return subpiece of the original data
size_t size = end_ - p_;
return StringPiece(rrest_ - size, rrest_);
}
private:
const char* rrest_;
const char* p_;
const char* end_;
const char* limit_;
char tmp_[2 * Base::kMaxSize];
type buf_[Base::kGroupSize];
size_t pos_;
size_t count_;
size_t remaining_;
};
typedef GroupVarintDecoder<uint32_t> GroupVarint32Decoder;
typedef GroupVarintDecoder<uint64_t> GroupVarint64Decoder;
} // namespace folly
#endif /* FOLLY_X64 || defined(__i386__) || FOLLY_PPC64 */

461
ios/Pods/Folly/folly/Hash.h generated Normal file
View File

@ -0,0 +1,461 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdint>
#include <cstring>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include <folly/ApplyTuple.h>
#include <folly/Bits.h>
#include <folly/SpookyHashV1.h>
#include <folly/SpookyHashV2.h>
/*
* Various hashing functions.
*/
namespace folly { namespace hash {
// This is a general-purpose way to create a single hash from multiple
// hashable objects. hash_combine_generic takes a class Hasher implementing
// hash<T>; hash_combine uses a default hasher StdHasher that uses std::hash.
// hash_combine_generic hashes each argument and combines those hashes in
// an order-dependent way to yield a new hash.
// This is the Hash128to64 function from Google's cityhash (available
// under the MIT License). We use it to reduce multiple 64 bit hashes
// into a single hash.
inline uint64_t hash_128_to_64(const uint64_t upper, const uint64_t lower) {
// Murmur-inspired hashing.
const uint64_t kMul = 0x9ddfea08eb382d69ULL;
uint64_t a = (lower ^ upper) * kMul;
a ^= (a >> 47);
uint64_t b = (upper ^ a) * kMul;
b ^= (b >> 47);
b *= kMul;
return b;
}
// Never used, but gcc demands it.
template <class Hasher>
inline size_t hash_combine_generic() {
return 0;
}
template <
class Iter,
class Hash = std::hash<typename std::iterator_traits<Iter>::value_type>>
uint64_t hash_range(Iter begin,
Iter end,
uint64_t hash = 0,
Hash hasher = Hash()) {
for (; begin != end; ++begin) {
hash = hash_128_to_64(hash, hasher(*begin));
}
return hash;
}
inline uint32_t twang_32from64(uint64_t key);
template <class Hasher, typename T, typename... Ts>
size_t hash_combine_generic(const T& t, const Ts&... ts) {
size_t seed = Hasher::hash(t);
if (sizeof...(ts) == 0) {
return seed;
}
size_t remainder = hash_combine_generic<Hasher>(ts...);
/* static */ if (sizeof(size_t) == sizeof(uint32_t)) {
return twang_32from64((uint64_t(seed) << 32) | remainder);
} else {
return static_cast<size_t>(hash_128_to_64(seed, remainder));
}
}
// Simply uses std::hash to hash. Note that std::hash is not guaranteed
// to be a very good hash function; provided std::hash doesn't collide on
// the individual inputs, you are fine, but that won't be true for, say,
// strings or pairs
class StdHasher {
public:
template <typename T>
static size_t hash(const T& t) {
return std::hash<T>()(t);
}
};
template <typename T, typename... Ts>
size_t hash_combine(const T& t, const Ts&... ts) {
return hash_combine_generic<StdHasher>(t, ts...);
}
//////////////////////////////////////////////////////////////////////
/*
* Thomas Wang 64 bit mix hash function
*/
inline uint64_t twang_mix64(uint64_t key) {
key = (~key) + (key << 21); // key *= (1 << 21) - 1; key -= 1;
key = key ^ (key >> 24);
key = key + (key << 3) + (key << 8); // key *= 1 + (1 << 3) + (1 << 8)
key = key ^ (key >> 14);
key = key + (key << 2) + (key << 4); // key *= 1 + (1 << 2) + (1 << 4)
key = key ^ (key >> 28);
key = key + (key << 31); // key *= 1 + (1 << 31)
return key;
}
/*
* Inverse of twang_mix64
*
* Note that twang_unmix64 is significantly slower than twang_mix64.
*/
inline uint64_t twang_unmix64(uint64_t key) {
// See the comments in jenkins_rev_unmix32 for an explanation as to how this
// was generated
key *= 4611686016279904257U;
key ^= (key >> 28) ^ (key >> 56);
key *= 14933078535860113213U;
key ^= (key >> 14) ^ (key >> 28) ^ (key >> 42) ^ (key >> 56);
key *= 15244667743933553977U;
key ^= (key >> 24) ^ (key >> 48);
key = (key + 1) * 9223367638806167551U;
return key;
}
/*
* Thomas Wang downscaling hash function
*/
inline uint32_t twang_32from64(uint64_t key) {
key = (~key) + (key << 18);
key = key ^ (key >> 31);
key = key * 21;
key = key ^ (key >> 11);
key = key + (key << 6);
key = key ^ (key >> 22);
return (uint32_t) key;
}
/*
* Robert Jenkins' reversible 32 bit mix hash function
*/
inline uint32_t jenkins_rev_mix32(uint32_t key) {
key += (key << 12); // key *= (1 + (1 << 12))
key ^= (key >> 22);
key += (key << 4); // key *= (1 + (1 << 4))
key ^= (key >> 9);
key += (key << 10); // key *= (1 + (1 << 10))
key ^= (key >> 2);
// key *= (1 + (1 << 7)) * (1 + (1 << 12))
key += (key << 7);
key += (key << 12);
return key;
}
/*
* Inverse of jenkins_rev_mix32
*
* Note that jenkinks_rev_unmix32 is significantly slower than
* jenkins_rev_mix32.
*/
inline uint32_t jenkins_rev_unmix32(uint32_t key) {
// These are the modular multiplicative inverses (in Z_2^32) of the
// multiplication factors in jenkins_rev_mix32, in reverse order. They were
// computed using the Extended Euclidean algorithm, see
// http://en.wikipedia.org/wiki/Modular_multiplicative_inverse
key *= 2364026753U;
// The inverse of a ^= (a >> n) is
// b = a
// for (int i = n; i < 32; i += n) {
// b ^= (a >> i);
// }
key ^=
(key >> 2) ^ (key >> 4) ^ (key >> 6) ^ (key >> 8) ^
(key >> 10) ^ (key >> 12) ^ (key >> 14) ^ (key >> 16) ^
(key >> 18) ^ (key >> 20) ^ (key >> 22) ^ (key >> 24) ^
(key >> 26) ^ (key >> 28) ^ (key >> 30);
key *= 3222273025U;
key ^= (key >> 9) ^ (key >> 18) ^ (key >> 27);
key *= 4042322161U;
key ^= (key >> 22);
key *= 16773121U;
return key;
}
/*
* Fowler / Noll / Vo (FNV) Hash
* http://www.isthe.com/chongo/tech/comp/fnv/
*/
const uint32_t FNV_32_HASH_START = 2166136261UL;
const uint64_t FNV_64_HASH_START = 14695981039346656037ULL;
inline uint32_t fnv32(const char* s,
uint32_t hash = FNV_32_HASH_START) {
for (; *s; ++s) {
hash += (hash << 1) + (hash << 4) + (hash << 7) +
(hash << 8) + (hash << 24);
hash ^= *s;
}
return hash;
}
inline uint32_t fnv32_buf(const void* buf,
size_t n,
uint32_t hash = FNV_32_HASH_START) {
// forcing signed char, since other platforms can use unsigned
const signed char* char_buf = reinterpret_cast<const signed char*>(buf);
for (size_t i = 0; i < n; ++i) {
hash += (hash << 1) + (hash << 4) + (hash << 7) +
(hash << 8) + (hash << 24);
hash ^= char_buf[i];
}
return hash;
}
inline uint32_t fnv32(const std::string& str,
uint32_t hash = FNV_32_HASH_START) {
return fnv32_buf(str.data(), str.size(), hash);
}
inline uint64_t fnv64(const char* s,
uint64_t hash = FNV_64_HASH_START) {
for (; *s; ++s) {
hash += (hash << 1) + (hash << 4) + (hash << 5) + (hash << 7) +
(hash << 8) + (hash << 40);
hash ^= *s;
}
return hash;
}
inline uint64_t fnv64_buf(const void* buf,
size_t n,
uint64_t hash = FNV_64_HASH_START) {
// forcing signed char, since other platforms can use unsigned
const signed char* char_buf = reinterpret_cast<const signed char*>(buf);
for (size_t i = 0; i < n; ++i) {
hash += (hash << 1) + (hash << 4) + (hash << 5) + (hash << 7) +
(hash << 8) + (hash << 40);
hash ^= char_buf[i];
}
return hash;
}
inline uint64_t fnv64(const std::string& str,
uint64_t hash = FNV_64_HASH_START) {
return fnv64_buf(str.data(), str.size(), hash);
}
/*
* Paul Hsieh: http://www.azillionmonkeys.com/qed/hash.html
*/
#define get16bits(d) folly::loadUnaligned<uint16_t>(d)
inline uint32_t hsieh_hash32_buf(const void* buf, size_t len) {
// forcing signed char, since other platforms can use unsigned
const unsigned char* s = reinterpret_cast<const unsigned char*>(buf);
uint32_t hash = static_cast<uint32_t>(len);
uint32_t tmp;
size_t rem;
if (len <= 0 || buf == 0) {
return 0;
}
rem = len & 3;
len >>= 2;
/* Main loop */
for (;len > 0; len--) {
hash += get16bits (s);
tmp = (get16bits (s+2) << 11) ^ hash;
hash = (hash << 16) ^ tmp;
s += 2*sizeof (uint16_t);
hash += hash >> 11;
}
/* Handle end cases */
switch (rem) {
case 3:
hash += get16bits(s);
hash ^= hash << 16;
hash ^= s[sizeof (uint16_t)] << 18;
hash += hash >> 11;
break;
case 2:
hash += get16bits(s);
hash ^= hash << 11;
hash += hash >> 17;
break;
case 1:
hash += *s;
hash ^= hash << 10;
hash += hash >> 1;
}
/* Force "avalanching" of final 127 bits */
hash ^= hash << 3;
hash += hash >> 5;
hash ^= hash << 4;
hash += hash >> 17;
hash ^= hash << 25;
hash += hash >> 6;
return hash;
};
#undef get16bits
inline uint32_t hsieh_hash32(const char* s) {
return hsieh_hash32_buf(s, std::strlen(s));
}
inline uint32_t hsieh_hash32_str(const std::string& str) {
return hsieh_hash32_buf(str.data(), str.size());
}
//////////////////////////////////////////////////////////////////////
} // namespace hash
template<class Key, class Enable = void>
struct hasher;
struct Hash {
template <class T>
size_t operator()(const T& v) const {
return hasher<T>()(v);
}
template <class T, class... Ts>
size_t operator()(const T& t, const Ts&... ts) const {
return hash::hash_128_to_64((*this)(t), (*this)(ts...));
}
};
template<> struct hasher<int32_t> {
size_t operator()(int32_t key) const {
return hash::jenkins_rev_mix32(uint32_t(key));
}
};
template<> struct hasher<uint32_t> {
size_t operator()(uint32_t key) const {
return hash::jenkins_rev_mix32(key);
}
};
template<> struct hasher<int64_t> {
size_t operator()(int64_t key) const {
return static_cast<size_t>(hash::twang_mix64(uint64_t(key)));
}
};
template<> struct hasher<uint64_t> {
size_t operator()(uint64_t key) const {
return static_cast<size_t>(hash::twang_mix64(key));
}
};
template<> struct hasher<std::string> {
size_t operator()(const std::string& key) const {
return static_cast<size_t>(
hash::SpookyHashV2::Hash64(key.data(), key.size(), 0));
}
};
template <class T>
struct hasher<T, typename std::enable_if<std::is_enum<T>::value, void>::type> {
size_t operator()(T key) const {
return Hash()(static_cast<typename std::underlying_type<T>::type>(key));
}
};
template <class T1, class T2>
struct hasher<std::pair<T1, T2>> {
size_t operator()(const std::pair<T1, T2>& key) const {
return Hash()(key.first, key.second);
}
};
template <typename... Ts>
struct hasher<std::tuple<Ts...>> {
size_t operator() (const std::tuple<Ts...>& key) const {
return applyTuple(Hash(), key);
}
};
// recursion
template <size_t index, typename... Ts>
struct TupleHasher {
size_t operator()(std::tuple<Ts...> const& key) const {
return hash::hash_combine(
TupleHasher<index - 1, Ts...>()(key),
std::get<index>(key));
}
};
// base
template <typename... Ts>
struct TupleHasher<0, Ts...> {
size_t operator()(std::tuple<Ts...> const& key) const {
// we could do std::hash here directly, but hash_combine hides all the
// ugly templating implicitly
return hash::hash_combine(std::get<0>(key));
}
};
} // namespace folly
// Custom hash functions.
namespace std {
// Hash function for pairs. Requires default hash functions for both
// items in the pair.
template <typename T1, typename T2>
struct hash<std::pair<T1, T2> > {
public:
size_t operator()(const std::pair<T1, T2>& x) const {
return folly::hash::hash_combine(x.first, x.second);
}
};
// Hash function for tuples. Requires default hash functions for all types.
template <typename... Ts>
struct hash<std::tuple<Ts...>> {
size_t operator()(std::tuple<Ts...> const& key) const {
folly::TupleHasher<
std::tuple_size<std::tuple<Ts...>>::value - 1, // start index
Ts...> hasher;
return hasher(key);
}
};
} // namespace std

463
ios/Pods/Folly/folly/IPAddress.h generated Normal file
View File

@ -0,0 +1,463 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <functional>
#include <iosfwd>
#include <memory>
#include <string>
#include <utility> // std::pair
#include <folly/Range.h>
#include <folly/IPAddressException.h>
#include <folly/IPAddressV4.h>
#include <folly/IPAddressV6.h>
#include <folly/detail/IPAddress.h>
namespace folly {
class IPAddress;
/**
* Pair of IPAddress, netmask
*/
typedef std::pair<IPAddress, uint8_t> CIDRNetwork;
/**
* Provides a unified interface for IP addresses.
*
* @note If you compare 2 IPAddress instances, v4-to-v6-mapped addresses are
* compared as V4 addresses.
*
* @note toLong/fromLong deal in network byte order, use toLongHBO/fromLongHBO
* if working in host byte order.
*
* Example usage:
* @code
* IPAddress v4addr("192.0.2.129");
* IPAddress v6map("::ffff:192.0.2.129");
* CHECK(v4addr.inSubnet("192.0.2.0/24") ==
* v4addr.inSubnet(IPAddress("192.0.2.0"), 24));
* CHECK(v4addr.inSubnet("192.0.2.128/30"));
* CHECK(!v4addr.inSubnet("192.0.2.128/32"));
* CHECK(v4addr.asV4().toLong() == 2164392128);
* CHECK(v4addr.asV4().toLongHBO() == 3221226113);
* CHECK(v4addr.isV4());
* CHECK(v6addr.isV6());
* CHECK(v4addr == v6map);
* CHECK(v6map.isIPv4Mapped());
* CHECK(v4addr.asV4() == IPAddress::createIPv4(v6map));
* CHECK(IPAddress::createIPv6(v4addr) == v6map.asV6());
* @encode
*/
class IPAddress {
public:
// returns true iff the input string can be parsed as an ip-address
static bool validate(StringPiece ip);
// return the V4 representation of the address, converting it from V6 to V4 if
// needed. Note that this will throw an IPAddressFormatException if the V6
// address is not IPv4Mapped.
static IPAddressV4 createIPv4(const IPAddress& addr);
// return the V6 representation of the address, converting it from V4 to V6 if
// needed.
static IPAddressV6 createIPv6(const IPAddress& addr);
/**
* Create a network and mask from a CIDR formatted address string.
* @param [in] ipSlashCidr IP/CIDR formatted string to split
* @param [in] defaultCidr default value if no /N specified (if defaultCidr
* is -1, will use /32 for IPv4 and /128 for IPv6)
* @param [in] mask apply mask on the address or not,
* e.g. 192.168.13.46/24 => 192.168.13.0/24
* @throws IPAddressFormatException if invalid address
* @return pair with IPAddress network and uint8_t mask
*/
static CIDRNetwork createNetwork(
StringPiece ipSlashCidr, int defaultCidr = -1, bool mask = true);
/**
* Return a string representation of a CIDR block created with createNetwork.
* @param [in] network, pair of address and cidr
*
* @return string representing the netblock
*/
static std::string networkToString(const CIDRNetwork& network);
/**
* Create a new IPAddress instance from the provided binary data
* in network byte order.
* @throws IPAddressFormatException if len is not 4 or 16
*/
static IPAddress fromBinary(ByteRange bytes);
/**
* Create an IPAddress from a 32bit long (network byte order).
* @throws IPAddressFormatException
*/
static IPAddress fromLong(uint32_t src);
// Same as above, but host byte order
static IPAddress fromLongHBO(uint32_t src);
// Given 2 IPAddress,mask pairs extract the longest common IPAddress,
// mask pair
static CIDRNetwork longestCommonPrefix(const CIDRNetwork& one,
const CIDRNetwork& two);
/**
* Constructs an uninitialized IPAddress.
*/
IPAddress();
/**
* Parse an IPAddress from a string representation.
*
* Formats accepted are exactly the same as the ones accepted by inet_pton(),
* using AF_INET6 if the string contains colons, and AF_INET otherwise;
* with the exception that the whole address can optionally be enclosed
* in square brackets.
*
* @throws IPAddressFormatException
*/
explicit IPAddress(StringPiece ip);
/**
* Create an IPAddress from a sockaddr.
* @throws IPAddressFormatException if nullptr or not AF_INET or AF_INET6
*/
explicit IPAddress(const sockaddr* addr);
// Create an IPAddress from a V4 address
/* implicit */ IPAddress(const IPAddressV4 ipV4Addr);
/* implicit */ IPAddress(const in_addr addr);
// Create an IPAddress from a V6 address
/* implicit */ IPAddress(const IPAddressV6& ipV6Addr);
/* implicit */ IPAddress(const in6_addr& addr);
// Assign from V4 address
IPAddress& operator=(const IPAddressV4& ipV4Addr);
// Assign from V6 address
IPAddress& operator=(const IPAddressV6& ipV6Addr);
/**
* Converts an IPAddress to an IPAddressV4 instance.
* @note This is not some handy convenience wrapper to convert an IPv4 address
* to a mapped IPv6 address. If you want that use
* IPAddress::createIPv6(addr)
* @throws IPAddressFormatException is not a V4 instance
*/
const IPAddressV4& asV4() const {
if (UNLIKELY(!isV4())) {
asV4Throw();
}
return addr_.ipV4Addr;
}
/**
* Converts an IPAddress to an IPAddressV6 instance.
* @throws InvalidAddressFamilyException is not a V6 instance
*/
const IPAddressV6& asV6() const {
if (UNLIKELY(!isV6())) {
asV6Throw();
}
return addr_.ipV6Addr;
}
// Return sa_family_t of IPAddress
sa_family_t family() const { return family_; }
// Populate sockaddr_storage with an appropriate value
int toSockaddrStorage(sockaddr_storage *dest, uint16_t port = 0) const {
if (dest == nullptr) {
throw IPAddressFormatException("dest must not be null");
}
memset(dest, 0, sizeof(sockaddr_storage));
dest->ss_family = family();
if (isV4()) {
sockaddr_in *sin = reinterpret_cast<sockaddr_in*>(dest);
sin->sin_addr = asV4().toAddr();
sin->sin_port = port;
#if defined(__APPLE__)
sin->sin_len = sizeof(*sin);
#endif
return sizeof(*sin);
} else if (isV6()) {
sockaddr_in6 *sin = reinterpret_cast<sockaddr_in6*>(dest);
sin->sin6_addr = asV6().toAddr();
sin->sin6_port = port;
sin->sin6_scope_id = asV6().getScopeId();
#if defined(__APPLE__)
sin->sin6_len = sizeof(*sin);
#endif
return sizeof(*sin);
} else {
throw InvalidAddressFamilyException(family());
}
}
/**
* Check if the address is found in the specified CIDR netblock.
*
* This will return false if the specified cidrNet is V4, but the address is
* V6. It will also return false if the specified cidrNet is V6 but the
* address is V4. This method will do the right thing in the case of a v6
* mapped v4 address.
*
* @note This is slower than the below counterparts. If perf is important use
* one of the two argument variations below.
* @param [in] ipSlashCidr address in "192.168.1.0/24" format
* @throws IPAddressFormatException if no /mask
* @return true if address is part of specified subnet with cidr
*/
bool inSubnet(StringPiece ipSlashCidr) const;
/**
* Check if an IPAddress belongs to a subnet.
* @param [in] subnet Subnet to check against (e.g. 192.168.1.0)
* @param [in] cidr CIDR for subnet (e.g. 24 for /24)
* @return true if address is part of specified subnet with cidr
*/
bool inSubnet(const IPAddress& subnet, uint8_t cidr) const;
/**
* Check if an IPAddress belongs to the subnet with the given mask.
* This is the same as inSubnet but the mask is provided instead of looked up
* from the cidr.
* @param [in] subnet Subnet to check against
* @param [in] mask The netmask for the subnet
* @return true if address is part of the specified subnet with mask
*/
bool inSubnetWithMask(const IPAddress& subnet, ByteRange mask) const;
// @return true if address is a v4 mapped address
bool isIPv4Mapped() const {
return isV6() && asV6().isIPv4Mapped();
}
// @return true if address is uninitialized
bool empty() const { return (family_ == AF_UNSPEC); }
// @return true if address is initialized
explicit operator bool() const { return !empty(); }
// @return true if this is an IPAddressV4 instance
bool isV4() const { return (family_ == AF_INET); }
// @return true if this is an IPAddressV6 instance
bool isV6() const { return (family_ == AF_INET6); }
// @return true if this address is all zeros
bool isZero() const {
return isV4() ? asV4().isZero()
: asV6().isZero();
}
// Number of bits in the address representation.
size_t bitCount() const {
return isV4() ? IPAddressV4::bitCount()
: IPAddressV6::bitCount();
}
// Number of bytes in the address representation.
size_t byteCount() const {
return bitCount() / 8;
}
//get nth most significant bit - 0 indexed
bool getNthMSBit(size_t bitIndex) const {
return detail::getNthMSBitImpl(*this, bitIndex, family());
}
//get nth most significant byte - 0 indexed
uint8_t getNthMSByte(size_t byteIndex) const;
//get nth bit - 0 indexed
bool getNthLSBit(size_t bitIndex) const {
return getNthMSBit(bitCount() - bitIndex - 1);
}
//get nth byte - 0 indexed
uint8_t getNthLSByte(size_t byteIndex) const {
return getNthMSByte(byteCount() - byteIndex - 1);
}
/**
* Get human-readable string representation of the address.
*
* This prints a string representation of the address, for human consumption
* or logging. The string will take the form of a JSON object that looks like:
* {family:'AF_INET|AF_INET6', addr:'address', hash:long}.
*/
std::string toJson() const {
return isV4() ? asV4().toJson()
: asV6().toJson();
}
// Hash of address
std::size_t hash() const {
return isV4() ? asV4().hash()
: asV6().hash();
}
// Return true if the address qualifies as localhost.
bool isLoopback() const {
return isV4() ? asV4().isLoopback()
: asV6().isLoopback();
}
// Return true if the address qualifies as link local
bool isLinkLocal() const {
return isV4() ? asV4().isLinkLocal()
: asV6().isLinkLocal();
}
// Return true if the address qualifies as broadcast.
bool isLinkLocalBroadcast() const {
return isV4() ? asV4().isLinkLocalBroadcast()
: asV6().isLinkLocalBroadcast();
}
/**
* Return true if the address is a special purpose address, as per rfc6890
* (i.e. 0.0.0.0).
* For V6, true if the address is not in one of global scope blocks:
* 2000::/3, ffxe::/16.
*/
bool isNonroutable() const {
return isV4() ? asV4().isNonroutable()
: asV6().isNonroutable();
}
/**
* Return true if the address is private, as per rfc1918 and rfc4193
* (for example, 192.168.xxx.xxx or fc00::/7 addresses)
*/
bool isPrivate() const {
return isV4() ? asV4().isPrivate()
: asV6().isPrivate();
}
// Return true if the address is a multicast address.
bool isMulticast() const {
return isV4() ? asV4().isMulticast()
: asV6().isMulticast();
}
/**
* Creates IPAddress instance with all but most significant numBits set to 0.
* @param [in] numBits number of bits to mask
* @throws abort if numBits > bitCount()
* @return IPAddress instance with bits set to 0
*/
IPAddress mask(uint8_t numBits) const {
return isV4() ? IPAddress(asV4().mask(numBits))
: IPAddress(asV6().mask(numBits));
}
/**
* Provides a string representation of address.
* @note The string representation is calculated on demand.
* @throws IPAddressFormatException on inet_ntop error
*/
std::string str() const {
return isV4() ? asV4().str()
: asV6().str();
}
/**
* Return the fully qualified string representation of the address.
* For V4 addresses this is the same as calling str(). For V6 addresses
* this is the hex representation with : characters inserted every 4 digits.
*/
std::string toFullyQualified() const {
return isV4() ? asV4().toFullyQualified()
: asV6().toFullyQualified();
}
// Address version (4 or 6)
uint8_t version() const {
return isV4() ? asV4().version()
: asV6().version();
}
/**
* Access to address bytes, in network byte order.
*/
const unsigned char* bytes() const {
return isV4() ? asV4().bytes() : asV6().bytes();
}
private:
[[noreturn]] void asV4Throw() const;
[[noreturn]] void asV6Throw() const;
typedef union IPAddressV46 {
IPAddressV4 ipV4Addr;
IPAddressV6 ipV6Addr;
// default constructor
IPAddressV46() {
std::memset(this, 0, sizeof(IPAddressV46));
}
explicit IPAddressV46(const IPAddressV4& addr): ipV4Addr(addr) {}
explicit IPAddressV46(const IPAddressV6& addr): ipV6Addr(addr) {}
} IPAddressV46;
IPAddressV46 addr_;
sa_family_t family_;
};
// boost::hash uses hash_value() so this allows boost::hash to work
// automatically for IPAddress
std::size_t hash_value(const IPAddress& addr);
std::ostream& operator<<(std::ostream& os, const IPAddress& addr);
// Define toAppend() to allow IPAddress to be used with folly::to<string>
void toAppend(IPAddress addr, std::string* result);
void toAppend(IPAddress addr, fbstring* result);
/**
* Return true if two addresses are equal.
*
* @note This takes into consideration V4 mapped addresses as well. If one
* address is v4 mapped we compare the v4 addresses.
*
* @return true if the two addresses are equal.
*/
bool operator==(const IPAddress& addr1, const IPAddress& addr2);
// Return true if addr1 < addr2
bool operator<(const IPAddress& addr1, const IPAddress& addr2);
// Derived operators
inline bool operator!=(const IPAddress& a, const IPAddress& b) {
return !(a == b);
}
inline bool operator>(const IPAddress& a, const IPAddress& b) {
return b < a;
}
inline bool operator<=(const IPAddress& a, const IPAddress& b) {
return !(a > b);
}
inline bool operator>=(const IPAddress& a, const IPAddress& b) {
return !(a < b);
}
} // folly
namespace std {
template<>
struct hash<folly::IPAddress> {
size_t operator()(const folly::IPAddress& addr) const {
return addr.hash();
}
};
} // std

View File

@ -0,0 +1,65 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <exception>
#include <string>
#include <utility>
#include <folly/detail/IPAddress.h>
namespace folly {
/**
* Exception for invalid IP addresses.
*/
class IPAddressFormatException : public std::exception {
public:
explicit IPAddressFormatException(std::string msg) noexcept
: msg_(std::move(msg)) {}
IPAddressFormatException(const IPAddressFormatException&) = default;
IPAddressFormatException(IPAddressFormatException&&) = default;
IPAddressFormatException& operator=(const IPAddressFormatException&) =
default;
IPAddressFormatException& operator=(IPAddressFormatException&&) = default;
virtual ~IPAddressFormatException() noexcept {}
virtual const char *what(void) const noexcept {
return msg_.c_str();
}
private:
std::string msg_;
};
class InvalidAddressFamilyException : public IPAddressFormatException {
public:
explicit InvalidAddressFamilyException(std::string msg) noexcept
: IPAddressFormatException(std::move(msg)) {}
explicit InvalidAddressFamilyException(sa_family_t family) noexcept
: InvalidAddressFamilyException(
"Address family " + detail::familyNameStr(family) +
" is not AF_INET or AF_INET6") {}
InvalidAddressFamilyException(const InvalidAddressFamilyException&) = default;
InvalidAddressFamilyException(InvalidAddressFamilyException&&) = default;
InvalidAddressFamilyException& operator=(
const InvalidAddressFamilyException&) = default;
InvalidAddressFamilyException& operator=(InvalidAddressFamilyException&&) =
default;
};
} // folly

306
ios/Pods/Folly/folly/IPAddressV4.h generated Normal file
View File

@ -0,0 +1,306 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstring>
#include <array>
#include <functional>
#include <iosfwd>
#include <folly/Hash.h>
#include <folly/Range.h>
#include <folly/detail/IPAddress.h>
namespace folly {
class IPAddress;
class IPAddressV4;
class IPAddressV6;
/**
* Pair of IPAddressV4, netmask
*/
typedef std::pair<IPAddressV4, uint8_t> CIDRNetworkV4;
/**
* Specialization for IPv4 addresses
*/
typedef std::array<uint8_t, 4> ByteArray4;
/**
* IPv4 variation of IPAddress.
*
* Added methods: toLong, toLongHBO and createIPv6
*
* @note toLong/fromLong deal in network byte order, use toLongHBO/fromLongHBO
* if working in host byte order.
*
* @see IPAddress
*/
class IPAddressV4 {
public:
// returns true iff the input string can be parsed as an ipv4-address
static bool validate(StringPiece ip);
// create an IPAddressV4 instance from a uint32_t (network byte order)
static IPAddressV4 fromLong(uint32_t src);
// same as above but host byte order
static IPAddressV4 fromLongHBO(uint32_t src);
/**
* Create a new IPAddress instance from the provided binary data.
* @throws IPAddressFormatException if the input length is not 4 bytes.
*/
static IPAddressV4 fromBinary(ByteRange bytes) {
IPAddressV4 addr;
addr.setFromBinary(bytes);
return addr;
}
/**
* Returns the address as a Range.
*/
ByteRange toBinary() const {
return ByteRange((const unsigned char *) &addr_.inAddr_.s_addr, 4);
}
/**
* Convert a IPv4 address string to a long in network byte order.
* @param [in] ip the address to convert
* @return the long representation of the address
*/
static uint32_t toLong(StringPiece ip);
// Same as above, but in host byte order.
// This is slightly slower than toLong.
static uint32_t toLongHBO(StringPiece ip);
/**
* Default constructor for IPAddressV4.
*
* The address value will be 0.0.0.0
*/
IPAddressV4();
// Create an IPAddressV4 from a string
// @throws IPAddressFormatException
explicit IPAddressV4(StringPiece ip);
// ByteArray4 constructor
explicit IPAddressV4(const ByteArray4& src);
// in_addr constructor
explicit IPAddressV4(const in_addr src);
// Return the V6 mapped representation of the address.
IPAddressV6 createIPv6() const;
/**
* Return a V6 address in the format of an 6To4 address.
*/
IPAddressV6 getIPv6For6To4() const;
// Return the long (network byte order) representation of the address.
uint32_t toLong() const {
return toAddr().s_addr;
}
// Return the long (host byte order) representation of the address.
// This is slightly slower than toLong.
uint32_t toLongHBO() const {
return ntohl(toLong());
}
/**
* @see IPAddress#bitCount
* @returns 32
*/
static size_t bitCount() { return 32; }
/**
* @See IPAddress#toJson
*/
std::string toJson() const;
size_t hash() const {
static const uint32_t seed = AF_INET;
uint32_t hashed = hash::fnv32_buf(&addr_, 4);
return hash::hash_combine(seed, hashed);
}
// @see IPAddress#inSubnet
// @throws IPAddressFormatException if string doesn't contain a V4 address
bool inSubnet(StringPiece cidrNetwork) const;
// return true if address is in subnet
bool inSubnet(const IPAddressV4& subnet, uint8_t cidr) const {
return inSubnetWithMask(subnet, fetchMask(cidr));
}
bool inSubnetWithMask(const IPAddressV4& subnet, const ByteArray4 mask) const;
// @see IPAddress#isLoopback
bool isLoopback() const;
// @see IPAddress#isLinkLocal
bool isLinkLocal() const;
// @see IPAddress#isNonroutable
bool isNonroutable() const;
// @see IPAddress#isPrivate
bool isPrivate() const;
// @see IPAddress#isMulticast
bool isMulticast() const;
// @see IPAddress#isZero
bool isZero() const {
constexpr auto zero = ByteArray4{{}};
return 0 == std::memcmp(bytes(), zero.data(), zero.size());
}
bool isLinkLocalBroadcast() const {
return (INADDR_BROADCAST == toLongHBO());
}
// @see IPAddress#mask
IPAddressV4 mask(size_t numBits) const;
// @see IPAddress#str
std::string str() const;
// return underlying in_addr structure
in_addr toAddr() const { return addr_.inAddr_; }
sockaddr_in toSockAddr() const {
sockaddr_in addr;
memset(&addr, 0, sizeof(sockaddr_in));
addr.sin_family = AF_INET;
memcpy(&addr.sin_addr, &addr_.inAddr_, sizeof(in_addr));
return addr;
}
ByteArray4 toByteArray() const {
ByteArray4 ba{{0}};
std::memcpy(ba.data(), bytes(), 4);
return ba;
}
// @see IPAddress#toFullyQualified
std::string toFullyQualified() const { return str(); }
// @see IPAddress#version
size_t version() const { return 4; }
/**
* Return the mask associated with the given number of bits.
* If for instance numBits was 24 (e.g. /24) then the V4 mask returned should
* be {0xff, 0xff, 0xff, 0x00}.
* @param [in] numBits bitmask to retrieve
* @throws abort if numBits == 0 or numBits > bitCount()
* @return mask associated with numBits
*/
static const ByteArray4 fetchMask(size_t numBits);
// Given 2 IPAddressV4, mask pairs extract the longest common IPAddress,
// mask pair
static CIDRNetworkV4 longestCommonPrefix(
const CIDRNetworkV4& one,
const CIDRNetworkV4& two);
// Number of bytes in the address representation.
static size_t byteCount() { return 4; }
//get nth most significant bit - 0 indexed
bool getNthMSBit(size_t bitIndex) const {
return detail::getNthMSBitImpl(*this, bitIndex, AF_INET);
}
//get nth most significant byte - 0 indexed
uint8_t getNthMSByte(size_t byteIndex) const;
//get nth bit - 0 indexed
bool getNthLSBit(size_t bitIndex) const {
return getNthMSBit(bitCount() - bitIndex - 1);
}
//get nth byte - 0 indexed
uint8_t getNthLSByte(size_t byteIndex) const {
return getNthMSByte(byteCount() - byteIndex - 1);
}
const unsigned char* bytes() const { return addr_.bytes_.data(); }
private:
union AddressStorage {
static_assert(sizeof(in_addr) == sizeof(ByteArray4),
"size of in_addr and ByteArray4 are different");
in_addr inAddr_;
ByteArray4 bytes_;
AddressStorage() {
std::memset(this, 0, sizeof(AddressStorage));
}
explicit AddressStorage(const ByteArray4 bytes): bytes_(bytes) {}
explicit AddressStorage(const in_addr addr): inAddr_(addr) {}
} addr_;
static const std::array<ByteArray4, 33> masks_;
/**
* Set the current IPAddressV4 object to have the address specified by bytes.
* @throws IPAddressFormatException if bytes.size() is not 4.
*/
void setFromBinary(ByteRange bytes);
};
// boost::hash uses hash_value() so this allows boost::hash to work
// automatically for IPAddressV4
size_t hash_value(const IPAddressV4& addr);
std::ostream& operator<<(std::ostream& os, const IPAddressV4& addr);
// Define toAppend() to allow IPAddressV4 to be used with to<string>
void toAppend(IPAddressV4 addr, std::string* result);
void toAppend(IPAddressV4 addr, fbstring* result);
/**
* Return true if two addresses are equal.
*/
inline bool operator==(const IPAddressV4& addr1, const IPAddressV4& addr2) {
return (addr1.toLong() == addr2.toLong());
}
// Return true if addr1 < addr2
inline bool operator<(const IPAddressV4& addr1, const IPAddressV4& addr2) {
return (addr1.toLongHBO() < addr2.toLongHBO());
}
// Derived operators
inline bool operator!=(const IPAddressV4& a, const IPAddressV4& b) {
return !(a == b);
}
inline bool operator>(const IPAddressV4& a, const IPAddressV4& b) {
return b < a;
}
inline bool operator<=(const IPAddressV4& a, const IPAddressV4& b) {
return !(a > b);
}
inline bool operator>=(const IPAddressV4& a, const IPAddressV4& b) {
return !(a < b);
}
} // folly
namespace std {
template<>
struct hash<folly::IPAddressV4> {
size_t operator()(const folly::IPAddressV4 addr) const {
return addr.hash();
}
};
} // std

391
ios/Pods/Folly/folly/IPAddressV6.h generated Normal file
View File

@ -0,0 +1,391 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstring>
#include <array>
#include <functional>
#include <iosfwd>
#include <map>
#include <stdexcept>
#include <folly/Hash.h>
#include <folly/Range.h>
#include <folly/detail/IPAddress.h>
namespace folly {
class IPAddress;
class IPAddressV4;
class IPAddressV6;
class MacAddress;
/**
* Pair of IPAddressV6, netmask
*/
typedef std::pair<IPAddressV6, uint8_t> CIDRNetworkV6;
/**
* Specialization for IPv6 addresses
*/
typedef std::array<uint8_t, 16> ByteArray16;
/**
* IPv6 variation of IPAddress.
*
* Added methods: createIPv4, getIPv4For6To4, is6To4,
* isTeredo, isIPv4Mapped, tryCreateIPv4, type
*
* @see IPAddress
*
* Notes on scope ID parsing:
*
* getaddrinfo() uses if_nametoindex() to convert interface names
* into a numerical index. For instance,
* "fe80::202:c9ff:fec1:ee08%eth0" may return scope ID 2 on some
* hosts, but other numbers on other hosts. It will fail entirely on
* hosts without an eth0 interface.
*
* Serializing / Deserializing IPAddressB6's on different hosts
* that use link-local scoping probably won't work.
*/
class IPAddressV6 {
public:
// V6 Address Type
enum Type {
TEREDO, T6TO4, NORMAL,
};
// A constructor parameter to indicate that we should create a link-local
// IPAddressV6.
enum LinkLocalTag {
LINK_LOCAL,
};
// Thrown when a type assertion fails
typedef std::runtime_error TypeError;
// Binary prefix for teredo networks
static const uint32_t PREFIX_TEREDO;
// Binary prefix for 6to4 networks
static const uint32_t PREFIX_6TO4;
// Size of std::string returned by toFullyQualified.
static constexpr size_t kToFullyQualifiedSize =
8 /*words*/ * 4 /*hex chars per word*/ + 7 /*separators*/;
// returns true iff the input string can be parsed as an ipv6-address
static bool validate(StringPiece ip);
/**
* Create a new IPAddress instance from the provided binary data.
* @throws IPAddressFormatException if the input length is not 16 bytes.
*/
static IPAddressV6 fromBinary(ByteRange bytes) {
IPAddressV6 addr;
addr.setFromBinary(bytes);
return addr;
}
/**
* Returns the address as a Range.
*/
ByteRange toBinary() const {
return ByteRange((const unsigned char *) &addr_.in6Addr_.s6_addr, 16);
}
/**
* Default constructor for IPAddressV6.
*
* The address value will be ::0
*/
IPAddressV6();
// Create an IPAddressV6 from a string
// @throws IPAddressFormatException
//
explicit IPAddressV6(StringPiece ip);
// ByteArray16 constructor
explicit IPAddressV6(const ByteArray16& src);
// in6_addr constructor
explicit IPAddressV6(const in6_addr& src);
// sockaddr_in6 constructor
explicit IPAddressV6(const sockaddr_in6& src);
/**
* Create a link-local IPAddressV6 from the specified ethernet MAC address.
*/
IPAddressV6(LinkLocalTag tag, MacAddress mac);
// return the mapped V4 address
// @throws IPAddressFormatException if !isIPv4Mapped
IPAddressV4 createIPv4() const;
/**
* Return a V4 address if this is a 6To4 address.
* @throws TypeError if not a 6To4 address
*/
IPAddressV4 getIPv4For6To4() const;
// Return true if a 6TO4 address
bool is6To4() const {
return type() == IPAddressV6::Type::T6TO4;
}
// Return true if a TEREDO address
bool isTeredo() const {
return type() == IPAddressV6::Type::TEREDO;
}
// return true if this is v4-to-v6-mapped
bool isIPv4Mapped() const;
// Return the V6 address type
Type type() const;
/**
* @see IPAddress#bitCount
* @returns 128
*/
static size_t bitCount() { return 128; }
/**
* @see IPAddress#toJson
*/
std::string toJson() const;
size_t hash() const;
// @see IPAddress#inSubnet
// @throws IPAddressFormatException if string doesn't contain a V6 address
bool inSubnet(StringPiece cidrNetwork) const;
// return true if address is in subnet
bool inSubnet(const IPAddressV6& subnet, uint8_t cidr) const {
return inSubnetWithMask(subnet, fetchMask(cidr));
}
bool inSubnetWithMask(const IPAddressV6& subnet,
const ByteArray16& mask) const;
// @see IPAddress#isLoopback
bool isLoopback() const;
// @see IPAddress#isNonroutable
bool isNonroutable() const {
return !isRoutable();
}
/**
* Return true if this address is routable.
*/
bool isRoutable() const;
// @see IPAddress#isPrivate
bool isPrivate() const;
/**
* Return true if this is a link-local IPv6 address.
*
* Note that this only returns true for addresses in the fe80::/10 range.
* It returns false for the loopback address (::1), even though this address
* is also effectively has link-local scope. It also returns false for
* link-scope and interface-scope multicast addresses.
*/
bool isLinkLocal() const;
/**
* Return true if this is a multicast address.
*/
bool isMulticast() const;
/**
* Return the flags for a multicast address.
* This method may only be called on multicast addresses.
*/
uint8_t getMulticastFlags() const;
/**
* Return the scope for a multicast address.
* This method may only be called on multicast addresses.
*/
uint8_t getMulticastScope() const;
// @see IPAddress#isZero
bool isZero() const {
constexpr auto zero = ByteArray16{{}};
return 0 == std::memcmp(bytes(), zero.data(), zero.size());
}
bool isLinkLocalBroadcast() const;
// @see IPAddress#mask
IPAddressV6 mask(size_t numBits) const;
// return underlying in6_addr structure
in6_addr toAddr() const { return addr_.in6Addr_; }
uint16_t getScopeId() const { return scope_; }
void setScopeId(uint16_t scope) {
scope_ = scope;
}
sockaddr_in6 toSockAddr() const {
sockaddr_in6 addr;
memset(&addr, 0, sizeof(sockaddr_in6));
addr.sin6_family = AF_INET6;
addr.sin6_scope_id = scope_;
memcpy(&addr.sin6_addr, &addr_.in6Addr_, sizeof(in6_addr));
return addr;
}
ByteArray16 toByteArray() const {
ByteArray16 ba{{0}};
std::memcpy(ba.data(), bytes(), 16);
return ba;
}
// @see IPAddress#toFullyQualified
std::string toFullyQualified() const;
// @see IPAddress#str
std::string str() const;
// @see IPAddress#version
size_t version() const { return 6; }
/**
* Return the solicited-node multicast address for this address.
*/
IPAddressV6 getSolicitedNodeAddress() const;
/**
* Return the mask associated with the given number of bits.
* If for instance numBits was 24 (e.g. /24) then the V4 mask returned should
* be {0xff, 0xff, 0xff, 0x00}.
* @param [in] numBits bitmask to retrieve
* @throws abort if numBits == 0 or numBits > bitCount()
* @return mask associated with numBits
*/
static const ByteArray16 fetchMask(size_t numBits);
// Given 2 IPAddressV6,mask pairs extract the longest common IPAddress,
// mask pair
static CIDRNetworkV6 longestCommonPrefix(
const CIDRNetworkV6& one,
const CIDRNetworkV6& two);
// Number of bytes in the address representation.
static constexpr size_t byteCount() { return 16; }
//get nth most significant bit - 0 indexed
bool getNthMSBit(size_t bitIndex) const {
return detail::getNthMSBitImpl(*this, bitIndex, AF_INET6);
}
//get nth most significant byte - 0 indexed
uint8_t getNthMSByte(size_t byteIndex) const;
//get nth bit - 0 indexed
bool getNthLSBit(size_t bitIndex) const {
return getNthMSBit(bitCount() - bitIndex - 1);
}
//get nth byte - 0 indexed
uint8_t getNthLSByte(size_t byteIndex) const {
return getNthMSByte(byteCount() - byteIndex - 1);
}
const unsigned char* bytes() const { return addr_.in6Addr_.s6_addr; }
protected:
/**
* Helper that returns true if the address is in the binary subnet specified
* by addr.
*/
bool inBinarySubnet(const std::array<uint8_t, 2> addr,
size_t numBits) const;
private:
union AddressStorage {
in6_addr in6Addr_;
ByteArray16 bytes_;
AddressStorage() {
std::memset(this, 0, sizeof(AddressStorage));
}
explicit AddressStorage(const ByteArray16& bytes): bytes_(bytes) {}
explicit AddressStorage(const in6_addr& addr): in6Addr_(addr) {}
explicit AddressStorage(MacAddress mac);
} addr_;
// Link-local scope id. This should always be 0 for IPAddresses that
// are *not* link-local.
uint16_t scope_{0};
static const std::array<ByteArray16, 129> masks_;
/**
* Set the current IPAddressV6 object to have the address specified by bytes.
* @throws IPAddressFormatException if bytes.size() is not 16.
*/
void setFromBinary(ByteRange bytes);
};
// boost::hash uses hash_value() so this allows boost::hash to work
// automatically for IPAddressV6
std::size_t hash_value(const IPAddressV6& addr);
std::ostream& operator<<(std::ostream& os, const IPAddressV6& addr);
// Define toAppend() to allow IPAddressV6 to be used with to<string>
void toAppend(IPAddressV6 addr, std::string* result);
void toAppend(IPAddressV6 addr, fbstring* result);
/**
* Return true if two addresses are equal.
*/
inline bool operator==(const IPAddressV6& addr1, const IPAddressV6& addr2) {
return (std::memcmp(addr1.toAddr().s6_addr, addr2.toAddr().s6_addr, 16) == 0)
&& addr1.getScopeId() == addr2.getScopeId();
}
// Return true if addr1 < addr2
inline bool operator<(const IPAddressV6& addr1, const IPAddressV6& addr2) {
auto cmp = std::memcmp(addr1.toAddr().s6_addr,
addr2.toAddr().s6_addr, 16) < 0;
if (!cmp) {
return addr1.getScopeId() < addr2.getScopeId();
} else {
return cmp;
}
}
// Derived operators
inline bool operator!=(const IPAddressV6& a, const IPAddressV6& b) {
return !(a == b);
}
inline bool operator>(const IPAddressV6& a, const IPAddressV6& b) {
return b < a;
}
inline bool operator<=(const IPAddressV6& a, const IPAddressV6& b) {
return !(a > b);
}
inline bool operator>=(const IPAddressV6& a, const IPAddressV6& b) {
return !(a < b);
}
} // folly
namespace std {
template<>
struct hash<folly::IPAddressV6> {
size_t operator()(const folly::IPAddressV6& addr) const {
return addr.hash();
}
};
} // std

121
ios/Pods/Folly/folly/Indestructible.h generated Normal file
View File

@ -0,0 +1,121 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <utility>
#include <glog/logging.h>
#include <folly/Likely.h>
#include <folly/Portability.h>
namespace folly {
/***
* Indestructible
*
* When you need a Meyers singleton that will not get destructed, even at
* shutdown, and you also want the object stored inline.
*
* Use like:
*
* void doSomethingWithExpensiveData();
*
* void doSomethingWithExpensiveData() {
* static const Indestructible<map<string, int>> data{
* map<string, int>{{"key1", 17}, {"key2", 19}, {"key3", 23}},
* };
* callSomethingTakingAMapByRef(*data);
* }
*
* This should be used only for Meyers singletons, and, even then, only when
* the instance does not need to be destructed ever.
*
* This should not be used more generally, e.g., as member fields, etc.
*
* This is designed as an alternative, but with one fewer allocation at
* construction time and one fewer pointer dereference at access time, to the
* Meyers singleton pattern of:
*
* void doSomethingWithExpensiveData() {
* static const auto data = // never `delete`d
* new map<string, int>{{"key1", 17}, {"key2", 19}, {"key3", 23}};
* callSomethingTakingAMapByRef(*data);
* }
*/
template <typename T>
class Indestructible final {
public:
template <typename... Args>
explicit constexpr Indestructible(Args&&... args) noexcept(
std::is_nothrow_constructible<T, Args&&...>::value)
: storage_(std::forward<Args>(args)...), inited_(true) {}
~Indestructible() = default;
Indestructible(Indestructible const&) = delete;
Indestructible& operator=(Indestructible const&) = delete;
Indestructible(Indestructible&& other) noexcept(
std::is_nothrow_move_constructible<T>::value)
: storage_(std::move(other.storage_.value)) {
other.inited_ = false;
}
Indestructible& operator=(Indestructible&& other) noexcept(
std::is_nothrow_move_assignable<T>::value) {
storage_.value = std::move(other.storage_.value);
other.inited_ = false;
}
T* get() {
check();
return &storage_.value;
}
T const* get() const {
check();
return &storage_.value;
}
T& operator*() { return *get(); }
T const& operator*() const { return *get(); }
T* operator->() { return get(); }
T const* operator->() const { return get(); }
private:
void check() const {
if (UNLIKELY(!inited_)) {
fail();
}
}
[[noreturn]] FOLLY_NOINLINE static void fail() {
LOG(FATAL) << "Indestructible is not initialized";
}
union Storage {
T value;
template <typename... Args>
explicit constexpr Storage(Args&&... args)
: value(std::forward<Args>(args)...) {}
~Storage() {}
};
Storage storage_;
bool inited_{false};
};
}

467
ios/Pods/Folly/folly/IndexedMemPool.h generated Normal file
View File

@ -0,0 +1,467 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <type_traits>
#include <stdint.h>
#include <assert.h>
#include <boost/noncopyable.hpp>
#include <folly/AtomicStruct.h>
#include <folly/detail/CacheLocality.h>
#include <folly/portability/SysMman.h>
#include <folly/portability/Unistd.h>
// Ignore shadowing warnings within this file, so includers can use -Wshadow.
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wshadow"
namespace folly {
namespace detail {
template <typename Pool>
struct IndexedMemPoolRecycler;
}
/// Instances of IndexedMemPool dynamically allocate and then pool their
/// element type (T), returning 4-byte integer indices that can be passed
/// to the pool's operator[] method to access or obtain pointers to the
/// actual elements. The memory backing items returned from the pool
/// will always be readable, even if items have been returned to the pool.
/// These two features are useful for lock-free algorithms. The indexing
/// behavior makes it easy to build tagged pointer-like-things, since
/// a large number of elements can be managed using fewer bits than a
/// full pointer. The access-after-free behavior makes it safe to read
/// from T-s even after they have been recycled, since it is guaranteed
/// that the memory won't have been returned to the OS and unmapped
/// (the algorithm must still use a mechanism to validate that the read
/// was correct, but it doesn't have to worry about page faults), and if
/// the elements use internal sequence numbers it can be guaranteed that
/// there won't be an ABA match due to the element being overwritten with
/// a different type that has the same bit pattern.
///
/// IndexedMemPool has two object lifecycle strategies. The first
/// is to construct objects when they are allocated from the pool and
/// destroy them when they are recycled. In this mode allocIndex and
/// allocElem have emplace-like semantics. In the second mode, objects
/// are default-constructed the first time they are removed from the pool,
/// and deleted when the pool itself is deleted. By default the first
/// mode is used for non-trivial T, and the second is used for trivial T.
///
/// IMPORTANT: Space for extra elements is allocated to account for those
/// that are inaccessible because they are in other local lists, so the
/// actual number of items that can be allocated ranges from capacity to
/// capacity + (NumLocalLists_-1)*LocalListLimit_. This is important if
/// you are trying to maximize the capacity of the pool while constraining
/// the bit size of the resulting pointers, because the pointers will
/// actually range up to the boosted capacity. See maxIndexForCapacity
/// and capacityForMaxIndex.
///
/// To avoid contention, NumLocalLists_ free lists of limited (less than
/// or equal to LocalListLimit_) size are maintained, and each thread
/// retrieves and returns entries from its associated local list. If the
/// local list becomes too large then elements are placed in bulk in a
/// global free list. This allows items to be efficiently recirculated
/// from consumers to producers. AccessSpreader is used to access the
/// local lists, so there is no performance advantage to having more
/// local lists than L1 caches.
///
/// The pool mmap-s the entire necessary address space when the pool is
/// constructed, but delays element construction. This means that only
/// elements that are actually returned to the caller get paged into the
/// process's resident set (RSS).
template <typename T,
int NumLocalLists_ = 32,
int LocalListLimit_ = 200,
template<typename> class Atom = std::atomic,
bool EagerRecycleWhenTrivial = false,
bool EagerRecycleWhenNotTrivial = true>
struct IndexedMemPool : boost::noncopyable {
typedef T value_type;
typedef std::unique_ptr<T, detail::IndexedMemPoolRecycler<IndexedMemPool>>
UniquePtr;
static_assert(LocalListLimit_ <= 255, "LocalListLimit must fit in 8 bits");
enum {
NumLocalLists = NumLocalLists_,
LocalListLimit = LocalListLimit_
};
static constexpr bool eagerRecycle() {
return std::is_trivial<T>::value
? EagerRecycleWhenTrivial : EagerRecycleWhenNotTrivial;
}
// these are public because clients may need to reason about the number
// of bits required to hold indices from a pool, given its capacity
static constexpr uint32_t maxIndexForCapacity(uint32_t capacity) {
// index of uint32_t(-1) == UINT32_MAX is reserved for isAllocated tracking
return std::min(uint64_t(capacity) + (NumLocalLists - 1) * LocalListLimit,
uint64_t(uint32_t(-1) - 1));
}
static constexpr uint32_t capacityForMaxIndex(uint32_t maxIndex) {
return maxIndex - (NumLocalLists - 1) * LocalListLimit;
}
/// Constructs a pool that can allocate at least _capacity_ elements,
/// even if all the local lists are full
explicit IndexedMemPool(uint32_t capacity)
: actualCapacity_(maxIndexForCapacity(capacity))
, size_(0)
, globalHead_(TaggedPtr{})
{
const size_t needed = sizeof(Slot) * (actualCapacity_ + 1);
size_t pagesize = sysconf(_SC_PAGESIZE);
mmapLength_ = ((needed - 1) & ~(pagesize - 1)) + pagesize;
assert(needed <= mmapLength_ && mmapLength_ < needed + pagesize);
assert((mmapLength_ % pagesize) == 0);
slots_ = static_cast<Slot*>(mmap(nullptr, mmapLength_,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
if (slots_ == MAP_FAILED) {
assert(errno == ENOMEM);
throw std::bad_alloc();
}
}
/// Destroys all of the contained elements
~IndexedMemPool() {
if (!eagerRecycle()) {
for (size_t i = size_; i > 0; --i) {
slots_[i].~Slot();
}
}
munmap(slots_, mmapLength_);
}
/// Returns a lower bound on the number of elements that may be
/// simultaneously allocated and not yet recycled. Because of the
/// local lists it is possible that more elements than this are returned
/// successfully
size_t capacity() {
return capacityForMaxIndex(actualCapacity_);
}
/// Finds a slot with a non-zero index, emplaces a T there if we're
/// using the eager recycle lifecycle mode, and returns the index,
/// or returns 0 if no elements are available.
template <typename ...Args>
uint32_t allocIndex(Args&&... args) {
static_assert(sizeof...(Args) == 0 || eagerRecycle(),
"emplace-style allocation requires eager recycle, "
"which is defaulted only for non-trivial types");
auto idx = localPop(localHead());
if (idx != 0 && eagerRecycle()) {
T* ptr = &slot(idx).elem;
new (ptr) T(std::forward<Args>(args)...);
}
return idx;
}
/// If an element is available, returns a std::unique_ptr to it that will
/// recycle the element to the pool when it is reclaimed, otherwise returns
/// a null (falsy) std::unique_ptr
template <typename ...Args>
UniquePtr allocElem(Args&&... args) {
auto idx = allocIndex(std::forward<Args>(args)...);
T* ptr = idx == 0 ? nullptr : &slot(idx).elem;
return UniquePtr(ptr, typename UniquePtr::deleter_type(this));
}
/// Gives up ownership previously granted by alloc()
void recycleIndex(uint32_t idx) {
assert(isAllocated(idx));
if (eagerRecycle()) {
slot(idx).elem.~T();
}
localPush(localHead(), idx);
}
/// Provides access to the pooled element referenced by idx
T& operator[](uint32_t idx) {
return slot(idx).elem;
}
/// Provides access to the pooled element referenced by idx
const T& operator[](uint32_t idx) const {
return slot(idx).elem;
}
/// If elem == &pool[idx], then pool.locateElem(elem) == idx. Also,
/// pool.locateElem(nullptr) == 0
uint32_t locateElem(const T* elem) const {
if (!elem) {
return 0;
}
static_assert(std::is_standard_layout<Slot>::value, "offsetof needs POD");
auto slot = reinterpret_cast<const Slot*>(
reinterpret_cast<const char*>(elem) - offsetof(Slot, elem));
auto rv = slot - slots_;
// this assert also tests that rv is in range
assert(elem == &(*this)[rv]);
return rv;
}
/// Returns true iff idx has been alloc()ed and not recycleIndex()ed
bool isAllocated(uint32_t idx) const {
return slot(idx).localNext == uint32_t(-1);
}
private:
///////////// types
struct Slot {
T elem;
uint32_t localNext;
uint32_t globalNext;
Slot() : localNext{}, globalNext{} {}
};
struct TaggedPtr {
uint32_t idx;
// size is bottom 8 bits, tag in top 24. g++'s code generation for
// bitfields seems to depend on the phase of the moon, plus we can
// do better because we can rely on other checks to avoid masking
uint32_t tagAndSize;
enum : uint32_t {
SizeBits = 8,
SizeMask = (1U << SizeBits) - 1,
TagIncr = 1U << SizeBits,
};
uint32_t size() const {
return tagAndSize & SizeMask;
}
TaggedPtr withSize(uint32_t repl) const {
assert(repl <= LocalListLimit);
return TaggedPtr{ idx, (tagAndSize & ~SizeMask) | repl };
}
TaggedPtr withSizeIncr() const {
assert(size() < LocalListLimit);
return TaggedPtr{ idx, tagAndSize + 1 };
}
TaggedPtr withSizeDecr() const {
assert(size() > 0);
return TaggedPtr{ idx, tagAndSize - 1 };
}
TaggedPtr withIdx(uint32_t repl) const {
return TaggedPtr{ repl, tagAndSize + TagIncr };
}
TaggedPtr withEmpty() const {
return withIdx(0).withSize(0);
}
};
struct FOLLY_ALIGN_TO_AVOID_FALSE_SHARING LocalList {
AtomicStruct<TaggedPtr,Atom> head;
LocalList() : head(TaggedPtr{}) {}
};
////////// fields
/// the actual number of slots that we will allocate, to guarantee
/// that we will satisfy the capacity requested at construction time.
/// They will be numbered 1..actualCapacity_ (note the 1-based counting),
/// and occupy slots_[1..actualCapacity_].
size_t actualCapacity_;
/// the number of bytes allocated from mmap, which is a multiple of
/// the page size of the machine
size_t mmapLength_;
/// this records the number of slots that have actually been constructed.
/// To allow use of atomic ++ instead of CAS, we let this overflow.
/// The actual number of constructed elements is min(actualCapacity_,
/// size_)
Atom<uint32_t> size_;
/// raw storage, only 1..min(size_,actualCapacity_) (inclusive) are
/// actually constructed. Note that slots_[0] is not constructed or used
FOLLY_ALIGN_TO_AVOID_FALSE_SHARING Slot* slots_;
/// use AccessSpreader to find your list. We use stripes instead of
/// thread-local to avoid the need to grow or shrink on thread start
/// or join. These are heads of lists chained with localNext
LocalList local_[NumLocalLists];
/// this is the head of a list of node chained by globalNext, that are
/// themselves each the head of a list chained by localNext
FOLLY_ALIGN_TO_AVOID_FALSE_SHARING AtomicStruct<TaggedPtr,Atom> globalHead_;
///////////// private methods
size_t slotIndex(uint32_t idx) const {
assert(0 < idx &&
idx <= actualCapacity_ &&
idx <= size_.load(std::memory_order_acquire));
return idx;
}
Slot& slot(uint32_t idx) {
return slots_[slotIndex(idx)];
}
const Slot& slot(uint32_t idx) const {
return slots_[slotIndex(idx)];
}
// localHead references a full list chained by localNext. s should
// reference slot(localHead), it is passed as a micro-optimization
void globalPush(Slot& s, uint32_t localHead) {
while (true) {
TaggedPtr gh = globalHead_.load(std::memory_order_acquire);
s.globalNext = gh.idx;
if (globalHead_.compare_exchange_strong(gh, gh.withIdx(localHead))) {
// success
return;
}
}
}
// idx references a single node
void localPush(AtomicStruct<TaggedPtr,Atom>& head, uint32_t idx) {
Slot& s = slot(idx);
TaggedPtr h = head.load(std::memory_order_acquire);
while (true) {
s.localNext = h.idx;
if (h.size() == LocalListLimit) {
// push will overflow local list, steal it instead
if (head.compare_exchange_strong(h, h.withEmpty())) {
// steal was successful, put everything in the global list
globalPush(s, idx);
return;
}
} else {
// local list has space
if (head.compare_exchange_strong(h, h.withIdx(idx).withSizeIncr())) {
// success
return;
}
}
// h was updated by failing CAS
}
}
// returns 0 if empty
uint32_t globalPop() {
while (true) {
TaggedPtr gh = globalHead_.load(std::memory_order_acquire);
if (gh.idx == 0 || globalHead_.compare_exchange_strong(
gh, gh.withIdx(slot(gh.idx).globalNext))) {
// global list is empty, or pop was successful
return gh.idx;
}
}
}
// returns 0 if allocation failed
uint32_t localPop(AtomicStruct<TaggedPtr,Atom>& head) {
while (true) {
TaggedPtr h = head.load(std::memory_order_acquire);
if (h.idx != 0) {
// local list is non-empty, try to pop
Slot& s = slot(h.idx);
if (head.compare_exchange_strong(
h, h.withIdx(s.localNext).withSizeDecr())) {
// success
s.localNext = uint32_t(-1);
return h.idx;
}
continue;
}
uint32_t idx = globalPop();
if (idx == 0) {
// global list is empty, allocate and construct new slot
if (size_.load(std::memory_order_relaxed) >= actualCapacity_ ||
(idx = ++size_) > actualCapacity_) {
// allocation failed
return 0;
}
// default-construct it now if we aren't going to construct and
// destroy on each allocation
if (!eagerRecycle()) {
T* ptr = &slot(idx).elem;
new (ptr) T();
}
slot(idx).localNext = uint32_t(-1);
return idx;
}
Slot& s = slot(idx);
if (head.compare_exchange_strong(
h, h.withIdx(s.localNext).withSize(LocalListLimit))) {
// global list moved to local list, keep head for us
s.localNext = uint32_t(-1);
return idx;
}
// local bulk push failed, return idx to the global list and try again
globalPush(s, idx);
}
}
AtomicStruct<TaggedPtr,Atom>& localHead() {
auto stripe = detail::AccessSpreader<Atom>::current(NumLocalLists);
return local_[stripe].head;
}
};
namespace detail {
/// This is a stateful Deleter functor, which allows std::unique_ptr
/// to track elements allocated from an IndexedMemPool by tracking the
/// associated pool. See IndexedMemPool::allocElem.
template <typename Pool>
struct IndexedMemPoolRecycler {
Pool* pool;
explicit IndexedMemPoolRecycler(Pool* pool) : pool(pool) {}
IndexedMemPoolRecycler(const IndexedMemPoolRecycler<Pool>& rhs)
= default;
IndexedMemPoolRecycler& operator= (const IndexedMemPoolRecycler<Pool>& rhs)
= default;
void operator()(typename Pool::value_type* elem) const {
pool->recycleIndex(pool->locateElem(elem));
}
};
}
} // namespace folly
# pragma GCC diagnostic pop

118
ios/Pods/Folly/folly/IntrusiveList.h generated Normal file
View File

@ -0,0 +1,118 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
/*
* This file contains convenience aliases that make boost::intrusive::list
* easier to use.
*/
#include <boost/intrusive/list.hpp>
namespace folly {
/**
* An auto-unlink intrusive list hook.
*/
using IntrusiveListHook = boost::intrusive::list_member_hook<
boost::intrusive::link_mode<boost::intrusive::auto_unlink>>;
/**
* An intrusive list.
*
* An IntrusiveList always uses an auto-unlink hook.
* Beware that IntrusiveList::size() is an O(n) operation, since it has to walk
* the entire list.
*
* Example usage:
*
* class Foo {
* // Note that the listHook member variable needs to be visible
* // to the code that defines the IntrusiveList instantiation.
* // The list hook can be made public, or you can make the other class a
* // friend.
* IntrusiveListHook listHook;
* };
*
* using FooList = IntrusiveList<Foo, &Foo::listHook>;
*
* Foo *foo = new Foo();
* FooList myList;
* myList.push_back(*foo);
*
* Note that each IntrusiveListHook can only be part of a single list at any
* given time. If you need the same object to be stored in two lists at once,
* you need to use two different IntrusiveListHook member variables.
*
* The elements stored in the list must contain an IntrusiveListHook member
* variable.
*/
template<typename T, IntrusiveListHook T::* PtrToMember>
using IntrusiveList = boost::intrusive::list<
T,
boost::intrusive::member_hook<T, IntrusiveListHook, PtrToMember>,
boost::intrusive::constant_time_size<false>>;
/**
* A safe-link intrusive list hook.
*/
using SafeIntrusiveListHook = boost::intrusive::list_member_hook<
boost::intrusive::link_mode<boost::intrusive::safe_link>>;
/**
* An intrusive list with const-time size() method.
*
* A CountedIntrusiveList always uses a safe-link hook.
* CountedIntrusiveList::size() is an O(1) operation. Users of this type
* of lists need to remove a member from a list by calling one of the
* methods on the list (e.g., erase(), pop_front(), etc.), rather than
* calling unlink on the member's list hook. Given references to a
* list and a member, a constant-time removal operation can be
* accomplished by list.erase(list.iterator_to(member)). Also, when a
* member is destroyed, it is NOT automatically removed from the list.
*
* Example usage:
*
* class Foo {
* // Note that the listHook member variable needs to be visible
* // to the code that defines the CountedIntrusiveList instantiation.
* // The list hook can be made public, or you can make the other class a
* // friend.
* SafeIntrusiveListHook listHook;
* };
*
* using FooList = CountedIntrusiveList<Foo, &Foo::listHook> FooList;
*
* Foo *foo = new Foo();
* FooList myList;
* myList.push_back(*foo);
* myList.pop_front();
*
* Note that each SafeIntrusiveListHook can only be part of a single list at any
* given time. If you need the same object to be stored in two lists at once,
* you need to use two different SafeIntrusiveListHook member variables.
*
* The elements stored in the list must contain an SafeIntrusiveListHook member
* variable.
*/
template<typename T, SafeIntrusiveListHook T::* PtrToMember>
using CountedIntrusiveList = boost::intrusive::list<
T,
boost::intrusive::member_hook<T, SafeIntrusiveListHook, PtrToMember>,
boost::intrusive::constant_time_size<true>>;
} // folly

133
ios/Pods/Folly/folly/Lazy.h generated Normal file
View File

@ -0,0 +1,133 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <utility>
#include <type_traits>
#include <folly/Optional.h>
namespace folly {
//////////////////////////////////////////////////////////////////////
/*
* Lazy -- for delayed initialization of a value. The value's
* initialization will be computed on demand at its first use, but
* will not be recomputed if its value is requested again. The value
* may still be mutated after its initialization if the lazy is not
* declared const.
*
* The value is created using folly::lazy, usually with a lambda, and
* its value is requested using operator().
*
* Note that the value is not safe for concurrent accesses by multiple
* threads, even if you declare it const. See note below.
*
*
* Example Usage:
*
* void foo() {
* auto const val = folly::lazy([&]{
* return something_expensive(blah());
* });
*
* if (condition1) {
* use(val());
* }
* if (condition2) {
* useMaybeAgain(val());
* } else {
* // Unneeded in this branch.
* }
* }
*
*
* Rationale:
*
* - operator() is used to request the value instead of an implicit
* conversion because the slight syntactic overhead in common
* seems worth the increased clarity.
*
* - Lazy values do not model CopyConstructible because it is
* unclear what semantics would be desirable. Either copies
* should share the cached value (adding overhead to cases that
* don't need to support copies), or they could recompute the
* value unnecessarily. Sharing with mutable lazies would also
* leave them with non-value semantics despite looking
* value-like.
*
* - Not thread safe for const accesses. Many use cases for lazy
* values are local variables on the stack, where multiple
* threads shouldn't even be able to reach the value. It still
* is useful to indicate/check that the value doesn't change with
* const, particularly when it is captured by a large family of
* lambdas. Adding internal synchronization seems like it would
* pessimize the most common use case in favor of less likely use
* cases.
*
*/
//////////////////////////////////////////////////////////////////////
namespace detail {
template<class Func>
struct Lazy {
typedef typename std::result_of<Func()>::type result_type;
explicit Lazy(Func&& f) : func_(std::move(f)) {}
explicit Lazy(Func& f) : func_(f) {}
Lazy(Lazy&& o)
: value_(std::move(o.value_))
, func_(std::move(o.func_))
{}
Lazy(const Lazy&) = delete;
Lazy& operator=(const Lazy&) = delete;
Lazy& operator=(Lazy&&) = delete;
const result_type& operator()() const {
return const_cast<Lazy&>(*this)();
}
result_type& operator()() {
if (!value_) value_ = func_();
return *value_;
}
private:
Optional<result_type> value_;
Func func_;
};
}
//////////////////////////////////////////////////////////////////////
template<class Func>
detail::Lazy<typename std::remove_reference<Func>::type>
lazy(Func&& fun) {
return detail::Lazy<typename std::remove_reference<Func>::type>(
std::forward<Func>(fun)
);
}
//////////////////////////////////////////////////////////////////////
}

606
ios/Pods/Folly/folly/LifoSem.h generated Normal file
View File

@ -0,0 +1,606 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <string.h>
#include <stdint.h>
#include <atomic>
#include <algorithm>
#include <memory>
#include <system_error>
#include <folly/AtomicStruct.h>
#include <folly/Baton.h>
#include <folly/IndexedMemPool.h>
#include <folly/Likely.h>
#include <folly/detail/CacheLocality.h>
namespace folly {
template <template<typename> class Atom = std::atomic,
class BatonType = Baton<Atom>>
struct LifoSemImpl;
/// LifoSem is a semaphore that wakes its waiters in a manner intended to
/// maximize performance rather than fairness. It should be preferred
/// to a mutex+condvar or POSIX sem_t solution when all of the waiters
/// are equivalent. It is faster than a condvar or sem_t, and it has a
/// shutdown state that might save you a lot of complexity when it comes
/// time to shut down your work pipelines. LifoSem is larger than sem_t,
/// but that is only because it uses padding and alignment to avoid
/// false sharing.
///
/// LifoSem allows multi-post and multi-tryWait, and provides a shutdown
/// state that awakens all waiters. LifoSem is faster than sem_t because
/// it performs exact wakeups, so it often requires fewer system calls.
/// It provides all of the functionality of sem_t except for timed waiting.
/// It is called LifoSem because its wakeup policy is approximately LIFO,
/// rather than the usual FIFO.
///
/// The core semaphore operations provided are:
///
/// -- post() -- if there is a pending waiter, wake it up, otherwise
/// increment the value of the semaphore. If the value of the semaphore
/// is already 2^32-1, does nothing. Compare to sem_post().
///
/// -- post(n) -- equivalent to n calls to post(), but much more efficient.
/// sem_t has no equivalent to this method.
///
/// -- bool tryWait() -- if the semaphore's value is positive, decrements it
/// and returns true, otherwise returns false. Compare to sem_trywait().
///
/// -- uint32_t tryWait(uint32_t n) -- attempts to decrement the semaphore's
/// value by n, returning the amount by which it actually was decremented
/// (a value from 0 to n inclusive). Not atomic. Equivalent to n calls
/// to tryWait(). sem_t has no equivalent to this method.
///
/// -- wait() -- waits until tryWait() can succeed. Compare to sem_wait().
///
/// LifoSem also has the notion of a shutdown state, in which any calls
/// that would block (or are already blocked) throw ShutdownSemError.
/// Note the difference between a call to wait() and a call to wait()
/// that might block. In the former case tryWait() would succeed, and no
/// isShutdown() check is performed. In the latter case an exception is
/// thrown. This behavior allows a LifoSem controlling work distribution
/// to drain. If you want to immediately stop all waiting on shutdown,
/// you can just check isShutdown() yourself (preferrably wrapped in
/// an UNLIKELY). This fast-stop behavior is easy to add, but difficult
/// to remove if you want the draining behavior, which is why we have
/// chosen the former. Since wait() is the only method that can block,
/// it is the only one that is affected by the shutdown state.
///
/// All LifoSem operations operations except valueGuess() are guaranteed
/// to be linearizable.
typedef LifoSemImpl<> LifoSem;
/// The exception thrown when wait()ing on an isShutdown() LifoSem
struct ShutdownSemError : public std::runtime_error {
explicit ShutdownSemError(const std::string& msg);
virtual ~ShutdownSemError() noexcept;
};
namespace detail {
// Internally, a LifoSem is either a value or a linked list of wait nodes.
// This union is captured in the LifoSemHead type, which holds either a
// value or an indexed pointer to the list. LifoSemHead itself is a value
// type, the head is a mutable atomic box containing a LifoSemHead value.
// Each wait node corresponds to exactly one waiter. Values can flow
// through the semaphore either by going into and out of the head's value,
// or by direct communication from a poster to a waiter. The former path
// is taken when there are no pending waiters, the latter otherwise. The
// general flow of a post is to try to increment the value or pop-and-post
// a wait node. Either of those have the effect of conveying one semaphore
// unit. Waiting is the opposite, either a decrement of the value or
// push-and-wait of a wait node. The generic LifoSemBase abstracts the
// actual mechanism by which a wait node's post->wait communication is
// performed, which is why we have LifoSemRawNode and LifoSemNode.
/// LifoSemRawNode is the actual pooled storage that backs LifoSemNode
/// for user-specified Handoff types. This is done so that we can have
/// a large static IndexedMemPool of nodes, instead of per-type pools
template <template<typename> class Atom>
struct LifoSemRawNode {
std::aligned_storage<sizeof(void*),alignof(void*)>::type raw;
/// The IndexedMemPool index of the next node in this chain, or 0
/// if none. This will be set to uint32_t(-1) if the node is being
/// posted due to a shutdown-induced wakeup
uint32_t next;
bool isShutdownNotice() const { return next == uint32_t(-1); }
void clearShutdownNotice() { next = 0; }
void setShutdownNotice() { next = uint32_t(-1); }
typedef folly::IndexedMemPool<LifoSemRawNode<Atom>,32,200,Atom> Pool;
/// Storage for all of the waiter nodes for LifoSem-s that use Atom
static Pool& pool();
};
/// Use this macro to declare the static storage that backs the raw nodes
/// for the specified atomic type
#define LIFOSEM_DECLARE_POOL(Atom, capacity) \
namespace folly { \
namespace detail { \
template <> \
LifoSemRawNode<Atom>::Pool& LifoSemRawNode<Atom>::pool() { \
static Pool* instance = new Pool((capacity)); \
return *instance; \
} \
} \
}
/// Handoff is a type not bigger than a void* that knows how to perform a
/// single post() -> wait() communication. It must have a post() method.
/// If it has a wait() method then LifoSemBase's wait() implementation
/// will work out of the box, otherwise you will need to specialize
/// LifoSemBase::wait accordingly.
template <typename Handoff, template<typename> class Atom>
struct LifoSemNode : public LifoSemRawNode<Atom> {
static_assert(sizeof(Handoff) <= sizeof(LifoSemRawNode<Atom>::raw),
"Handoff too big for small-object optimization, use indirection");
static_assert(alignof(Handoff) <=
alignof(decltype(LifoSemRawNode<Atom>::raw)),
"Handoff alignment constraint not satisfied");
template <typename ...Args>
void init(Args&&... args) {
new (&this->raw) Handoff(std::forward<Args>(args)...);
}
void destroy() {
handoff().~Handoff();
#ifndef NDEBUG
memset(&this->raw, 'F', sizeof(this->raw));
#endif
}
Handoff& handoff() {
return *static_cast<Handoff*>(static_cast<void*>(&this->raw));
}
const Handoff& handoff() const {
return *static_cast<const Handoff*>(static_cast<const void*>(&this->raw));
}
};
template <typename Handoff, template<typename> class Atom>
struct LifoSemNodeRecycler {
void operator()(LifoSemNode<Handoff,Atom>* elem) const {
elem->destroy();
auto idx = LifoSemRawNode<Atom>::pool().locateElem(elem);
LifoSemRawNode<Atom>::pool().recycleIndex(idx);
}
};
/// LifoSemHead is a 64-bit struct that holds a 32-bit value, some state
/// bits, and a sequence number used to avoid ABA problems in the lock-free
/// management of the LifoSem's wait lists. The value can either hold
/// an integral semaphore value (if there are no waiters) or a node index
/// (see IndexedMemPool) for the head of a list of wait nodes
class LifoSemHead {
// What we really want are bitfields:
// uint64_t data : 32; uint64_t isNodeIdx : 1; uint64_t seq : 31;
// Unfortunately g++ generates pretty bad code for this sometimes (I saw
// -O3 code from gcc 4.7.1 copying the bitfields one at a time instead of
// in bulk, for example). We can generate better code anyway by assuming
// that setters won't be given values that cause under/overflow, and
// putting the sequence at the end where its planned overflow doesn't
// need any masking.
//
// data == 0 (empty list) with isNodeIdx is conceptually the same
// as data == 0 (no unclaimed increments) with !isNodeIdx, we always
// convert the former into the latter to make the logic simpler.
enum {
IsNodeIdxShift = 32,
IsShutdownShift = 33,
SeqShift = 34,
};
enum : uint64_t {
IsNodeIdxMask = uint64_t(1) << IsNodeIdxShift,
IsShutdownMask = uint64_t(1) << IsShutdownShift,
SeqIncr = uint64_t(1) << SeqShift,
SeqMask = ~(SeqIncr - 1),
};
public:
uint64_t bits;
//////// getters
inline uint32_t idx() const {
assert(isNodeIdx());
assert(uint32_t(bits) != 0);
return uint32_t(bits);
}
inline uint32_t value() const {
assert(!isNodeIdx());
return uint32_t(bits);
}
inline constexpr bool isNodeIdx() const {
return (bits & IsNodeIdxMask) != 0;
}
inline constexpr bool isShutdown() const {
return (bits & IsShutdownMask) != 0;
}
inline constexpr uint32_t seq() const {
return uint32_t(bits >> SeqShift);
}
//////// setter-like things return a new struct
/// This should only be used for initial construction, not for setting
/// the value, because it clears the sequence number
static inline constexpr LifoSemHead fresh(uint32_t value) {
return LifoSemHead{ value };
}
/// Returns the LifoSemHead that results from popping a waiter node,
/// given the current waiter node's next ptr
inline LifoSemHead withPop(uint32_t idxNext) const {
assert(isNodeIdx());
if (idxNext == 0) {
// no isNodeIdx bit or data bits. Wraparound of seq bits is okay
return LifoSemHead{ (bits & (SeqMask | IsShutdownMask)) + SeqIncr };
} else {
// preserve sequence bits (incremented with wraparound okay) and
// isNodeIdx bit, replace all data bits
return LifoSemHead{
(bits & (SeqMask | IsShutdownMask | IsNodeIdxMask)) +
SeqIncr + idxNext };
}
}
/// Returns the LifoSemHead that results from pushing a new waiter node
inline LifoSemHead withPush(uint32_t _idx) const {
assert(isNodeIdx() || value() == 0);
assert(!isShutdown());
assert(_idx != 0);
return LifoSemHead{ (bits & SeqMask) | IsNodeIdxMask | _idx };
}
/// Returns the LifoSemHead with value increased by delta, with
/// saturation if the maximum value is reached
inline LifoSemHead withValueIncr(uint32_t delta) const {
assert(!isNodeIdx());
auto rv = LifoSemHead{ bits + SeqIncr + delta };
if (UNLIKELY(rv.isNodeIdx())) {
// value has overflowed into the isNodeIdx bit
rv = LifoSemHead{ (rv.bits & ~IsNodeIdxMask) | (IsNodeIdxMask - 1) };
}
return rv;
}
/// Returns the LifoSemHead that results from decrementing the value
inline LifoSemHead withValueDecr(uint32_t delta) const {
assert(delta > 0 && delta <= value());
return LifoSemHead{ bits + SeqIncr - delta };
}
/// Returns the LifoSemHead with the same state as the current node,
/// but with the shutdown bit set
inline LifoSemHead withShutdown() const {
return LifoSemHead{ bits | IsShutdownMask };
}
inline constexpr bool operator== (const LifoSemHead& rhs) const {
return bits == rhs.bits;
}
inline constexpr bool operator!= (const LifoSemHead& rhs) const {
return !(*this == rhs);
}
};
/// LifoSemBase is the engine for several different types of LIFO
/// semaphore. LifoSemBase handles storage of positive semaphore values
/// and wait nodes, but the actual waiting and notification mechanism is
/// up to the client.
///
/// The Handoff type is responsible for arranging one wakeup notification.
/// See LifoSemNode for more information on how to make your own.
template <typename Handoff,
template<typename> class Atom = std::atomic>
struct LifoSemBase {
/// Constructor
constexpr explicit LifoSemBase(uint32_t initialValue = 0)
: head_(LifoSemHead::fresh(initialValue)), padding_() {}
LifoSemBase(LifoSemBase const&) = delete;
LifoSemBase& operator=(LifoSemBase const&) = delete;
/// Silently saturates if value is already 2^32-1
void post() {
auto idx = incrOrPop(1);
if (idx != 0) {
idxToNode(idx).handoff().post();
}
}
/// Equivalent to n calls to post(), except may be much more efficient.
/// At any point in time at which the semaphore's value would exceed
/// 2^32-1 if tracked with infinite precision, it may be silently
/// truncated to 2^32-1. This saturation is not guaranteed to be exact,
/// although it is guaranteed that overflow won't result in wrap-around.
/// There would be a substantial performance and complexity cost in
/// guaranteeing exact saturation (similar to the cost of maintaining
/// linearizability near the zero value, but without as much of
/// a benefit).
void post(uint32_t n) {
uint32_t idx;
while (n > 0 && (idx = incrOrPop(n)) != 0) {
// pop accounts for only 1
idxToNode(idx).handoff().post();
--n;
}
}
/// Returns true iff shutdown() has been called
bool isShutdown() const {
return UNLIKELY(head_.load(std::memory_order_acquire).isShutdown());
}
/// Prevents blocking on this semaphore, causing all blocking wait()
/// calls to throw ShutdownSemError. Both currently blocked wait() and
/// future calls to wait() for which tryWait() would return false will
/// cause an exception. Calls to wait() for which the matching post()
/// has already occurred will proceed normally.
void shutdown() {
// first set the shutdown bit
auto h = head_.load(std::memory_order_acquire);
while (!h.isShutdown()) {
if (head_.compare_exchange_strong(h, h.withShutdown())) {
// success
h = h.withShutdown();
break;
}
// compare_exchange_strong rereads h, retry
}
// now wake up any waiters
while (h.isNodeIdx()) {
auto& node = idxToNode(h.idx());
auto repl = h.withPop(node.next);
if (head_.compare_exchange_strong(h, repl)) {
// successful pop, wake up the waiter and move on. The next
// field is used to convey that this wakeup didn't consume a value
node.setShutdownNotice();
node.handoff().post();
h = repl;
}
}
}
/// Returns true iff value was decremented
bool tryWait() {
uint32_t n = 1;
auto rv = decrOrPush(n, 0);
assert((rv == WaitResult::DECR && n == 0) ||
(rv != WaitResult::DECR && n == 1));
// SHUTDOWN is okay here, since we don't actually wait
return rv == WaitResult::DECR;
}
/// Equivalent to (but may be much more efficient than) n calls to
/// tryWait(). Returns the total amount by which the semaphore's value
/// was decreased
uint32_t tryWait(uint32_t n) {
auto const orig = n;
while (n > 0) {
#ifndef NDEBUG
auto prev = n;
#endif
auto rv = decrOrPush(n, 0);
assert((rv == WaitResult::DECR && n < prev) ||
(rv != WaitResult::DECR && n == prev));
if (rv != WaitResult::DECR) {
break;
}
}
return orig - n;
}
/// Blocks the current thread until there is a matching post or the
/// semaphore is shut down. Throws ShutdownSemError if the semaphore
/// has been shut down and this method would otherwise be blocking.
/// Note that wait() doesn't throw during shutdown if tryWait() would
/// return true
void wait() {
// early check isn't required for correctness, but is an important
// perf win if we can avoid allocating and deallocating a node
if (tryWait()) {
return;
}
// allocateNode() won't compile unless Handoff has a default
// constructor
UniquePtr node = allocateNode();
auto rv = tryWaitOrPush(*node);
if (UNLIKELY(rv == WaitResult::SHUTDOWN)) {
assert(isShutdown());
throw ShutdownSemError("wait() would block but semaphore is shut down");
}
if (rv == WaitResult::PUSH) {
node->handoff().wait();
if (UNLIKELY(node->isShutdownNotice())) {
// this wait() didn't consume a value, it was triggered by shutdown
assert(isShutdown());
throw ShutdownSemError(
"blocking wait() interrupted by semaphore shutdown");
}
// node->handoff().wait() can't return until after the node has
// been popped and post()ed, so it is okay for the UniquePtr to
// recycle the node now
}
// else node wasn't pushed, so it is safe to recycle
}
/// Returns a guess at the current value, designed for debugging.
/// If there are no concurrent posters or waiters then this will
/// be correct
uint32_t valueGuess() const {
// this is actually linearizable, but we don't promise that because
// we may want to add striping in the future to help under heavy
// contention
auto h = head_.load(std::memory_order_acquire);
return h.isNodeIdx() ? 0 : h.value();
}
protected:
enum class WaitResult {
PUSH,
DECR,
SHUTDOWN,
};
/// The type of a std::unique_ptr that will automatically return a
/// LifoSemNode to the appropriate IndexedMemPool
typedef std::unique_ptr<LifoSemNode<Handoff, Atom>,
LifoSemNodeRecycler<Handoff, Atom>> UniquePtr;
/// Returns a node that can be passed to decrOrLink
template <typename... Args>
UniquePtr allocateNode(Args&&... args) {
auto idx = LifoSemRawNode<Atom>::pool().allocIndex();
if (idx != 0) {
auto& node = idxToNode(idx);
node.clearShutdownNotice();
try {
node.init(std::forward<Args>(args)...);
} catch (...) {
LifoSemRawNode<Atom>::pool().recycleIndex(idx);
throw;
}
return UniquePtr(&node);
} else {
return UniquePtr();
}
}
/// Returns DECR if the semaphore value was decremented (and waiterNode
/// was untouched), PUSH if a reference to the wait node was pushed,
/// or SHUTDOWN if decrement was not possible and push wasn't allowed
/// because isShutdown(). Ownership of the wait node remains the
/// responsibility of the caller, who must not release it until after
/// the node's Handoff has been posted.
WaitResult tryWaitOrPush(LifoSemNode<Handoff, Atom>& waiterNode) {
uint32_t n = 1;
return decrOrPush(n, nodeToIdx(waiterNode));
}
private:
FOLLY_ALIGN_TO_AVOID_FALSE_SHARING
folly::AtomicStruct<LifoSemHead,Atom> head_;
char padding_[folly::detail::CacheLocality::kFalseSharingRange -
sizeof(LifoSemHead)];
static LifoSemNode<Handoff, Atom>& idxToNode(uint32_t idx) {
auto raw = &LifoSemRawNode<Atom>::pool()[idx];
return *static_cast<LifoSemNode<Handoff, Atom>*>(raw);
}
static uint32_t nodeToIdx(const LifoSemNode<Handoff, Atom>& node) {
return LifoSemRawNode<Atom>::pool().locateElem(&node);
}
/// Either increments by n and returns 0, or pops a node and returns it.
/// If n + the stripe's value overflows, then the stripe's value
/// saturates silently at 2^32-1
uint32_t incrOrPop(uint32_t n) {
while (true) {
assert(n > 0);
auto head = head_.load(std::memory_order_acquire);
if (head.isNodeIdx()) {
auto& node = idxToNode(head.idx());
if (head_.compare_exchange_strong(head, head.withPop(node.next))) {
// successful pop
return head.idx();
}
} else {
auto after = head.withValueIncr(n);
if (head_.compare_exchange_strong(head, after)) {
// successful incr
return 0;
}
}
// retry
}
}
/// Returns DECR if some amount was decremented, with that amount
/// subtracted from n. If n is 1 and this function returns DECR then n
/// must be 0 afterward. Returns PUSH if no value could be decremented
/// and idx was pushed, or if idx was zero and no push was performed but
/// a push would have been performed with a valid node. Returns SHUTDOWN
/// if the caller should have blocked but isShutdown(). If idx == 0,
/// may return PUSH even after isShutdown() or may return SHUTDOWN
WaitResult decrOrPush(uint32_t& n, uint32_t idx) {
assert(n > 0);
while (true) {
auto head = head_.load(std::memory_order_acquire);
if (!head.isNodeIdx() && head.value() > 0) {
// decr
auto delta = std::min(n, head.value());
if (head_.compare_exchange_strong(head, head.withValueDecr(delta))) {
n -= delta;
return WaitResult::DECR;
}
} else {
// push
if (idx == 0) {
return WaitResult::PUSH;
}
if (UNLIKELY(head.isShutdown())) {
return WaitResult::SHUTDOWN;
}
auto& node = idxToNode(idx);
node.next = head.isNodeIdx() ? head.idx() : 0;
if (head_.compare_exchange_strong(head, head.withPush(idx))) {
// push succeeded
return WaitResult::PUSH;
}
}
}
// retry
}
};
} // namespace detail
template <template<typename> class Atom, class BatonType>
struct LifoSemImpl : public detail::LifoSemBase<BatonType, Atom> {
constexpr explicit LifoSemImpl(uint32_t v = 0)
: detail::LifoSemBase<BatonType, Atom>(v) {}
};
} // namespace folly

35
ios/Pods/Folly/folly/Likely.h generated Normal file
View File

@ -0,0 +1,35 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Compiler hints to indicate the fast path of an "if" branch: whether
* the if condition is likely to be true or false.
*
* @author Tudor Bosman (tudorb@fb.com)
*/
#pragma once
#undef LIKELY
#undef UNLIKELY
#if defined(__GNUC__) && __GNUC__ >= 4
#define LIKELY(x) (__builtin_expect((x), 1))
#define UNLIKELY(x) (__builtin_expect((x), 0))
#else
#define LIKELY(x) (x)
#define UNLIKELY(x) (x)
#endif

640
ios/Pods/Folly/folly/LockTraits.h generated Normal file
View File

@ -0,0 +1,640 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This module provides a traits class for describing properties about mutex
* classes.
*
* This is a primitive for building higher-level abstractions that can work
* with a variety of mutex classes. For instance, this allows
* folly::Synchronized to support a number of different mutex types.
*/
#pragma once
#include <chrono>
#include <type_traits>
// Android, OSX, and Cygwin don't have timed mutexes
#if defined(ANDROID) || defined(__ANDROID__) || defined(__APPLE__) || \
defined(__CYGWIN__)
#define FOLLY_LOCK_TRAITS_HAVE_TIMED_MUTEXES 0
#else
#define FOLLY_LOCK_TRAITS_HAVE_TIMED_MUTEXES 1
#endif
namespace folly {
namespace detail {
/**
* An enum to describe the "level" of a mutex. The supported levels are
* Unique - a normal mutex that supports only exclusive locking
* Shared - a shared mutex which has shared locking and unlocking functions;
* Upgrade - a mutex that has all the methods of the two above along with
* support for upgradable locking
*/
enum class MutexLevel { UNIQUE, SHARED, UPGRADE };
/**
* A template dispatch mechanism that is used to determine the level of the
* mutex based on its interface. As decided by LockInterfaceDispatcher.
*/
template <bool is_unique, bool is_shared, bool is_upgrade>
struct MutexLevelValueImpl;
template <>
struct MutexLevelValueImpl<true, false, false> {
static constexpr MutexLevel value = MutexLevel::UNIQUE;
};
template <>
struct MutexLevelValueImpl<true, true, false> {
static constexpr MutexLevel value = MutexLevel::SHARED;
};
template <>
struct MutexLevelValueImpl<true, true, true> {
static constexpr MutexLevel value = MutexLevel::UPGRADE;
};
/**
* An internal helper class to help identify the interface supported by the
* mutex. This is used in conjunction with the above MutexLevel
* specializations and the LockTraitsImpl to determine what functions are
* supported by objects of type Mutex
*
* The implementation uses SINAE in the return value with trailing return
* types to figure out what level a mutex is
*/
template <class Mutex>
class LockInterfaceDispatcher {
private:
// assert that the mutex type has basic lock and unlock functions
static_assert(
std::is_same<decltype(std::declval<Mutex>().lock()), void>::value,
"The mutex type must support lock and unlock functions");
// Helper functions for implementing the traits using SFINAE
template <class T>
static auto timed_lock_test(T*) -> typename std::is_same<
decltype(std::declval<T>().try_lock_for(std::chrono::milliseconds(0))),
bool>::type;
template <class T>
static std::false_type timed_lock_test(...);
template <class T>
static auto lock_shared_test(T*) -> typename std::
is_same<decltype(std::declval<T>().lock_shared()), void>::type;
template <class T>
static std::false_type lock_shared_test(...);
template <class T>
static auto lock_upgrade_test(T*) -> typename std::
is_same<decltype(std::declval<T>().lock_upgrade()), void>::type;
template <class T>
static std::false_type lock_upgrade_test(...);
public:
static constexpr bool has_lock_unique = true;
static constexpr bool has_lock_timed =
decltype(timed_lock_test<Mutex>(0))::value;
static constexpr bool has_lock_shared =
decltype(lock_shared_test<Mutex>(0))::value;
static constexpr bool has_lock_upgrade =
decltype(lock_upgrade_test<Mutex>(0))::value;
};
/**
* LockTraitsImpl is the base that is used to desribe the interface used by
* different mutex types. It accepts a MutexLevel argument and a boolean to
* show whether the mutex is a timed mutex or not. The implementations are
* partially specialized and inherit from the other implementations to get
* similar functionality
*/
template <class Mutex, MutexLevel level, bool is_timed>
struct LockTraitsImpl;
template <class Mutex>
struct LockTraitsImpl<Mutex, MutexLevel::UNIQUE, false> {
static constexpr bool is_timed{false};
static constexpr bool is_shared{false};
static constexpr bool is_upgrade{false};
/**
* Acquire the lock exclusively.
*/
static void lock(Mutex& mutex) {
mutex.lock();
}
/**
* Release an exclusively-held lock.
*/
static void unlock(Mutex& mutex) {
mutex.unlock();
}
};
/**
* Higher level mutexes have all the capabilities of the lower levels so
* inherit
*/
template <class Mutex>
struct LockTraitsImpl<Mutex, MutexLevel::SHARED, false>
: public LockTraitsImpl<Mutex, MutexLevel::UNIQUE, false> {
static constexpr bool is_timed{false};
static constexpr bool is_shared{true};
static constexpr bool is_upgrade{false};
/**
* Acquire the lock in shared (read) mode.
*/
static void lock_shared(Mutex& mutex) {
mutex.lock_shared();
}
/**
* Release a lock held in shared mode.
*/
static void unlock_shared(Mutex& mutex) {
mutex.unlock_shared();
}
};
/**
* The following methods are supported. There are a few methods
*
* m.lock_upgrade()
* m.unlock_upgrade()
*
* m.unlock_upgrade_and_lock()
*
* m.unlock_and_lock_upgrade()
* m.unlock_and_lock_shared()
* m.unlock_upgrade_and_lock_shared()
*
* m.try_lock_upgrade_for(rel_time)
* m.try_unlock_upgrade_and_lock_for(rel_time)
*
* Upgrading a shared lock is likely to deadlock when there is more than one
* thread performing an upgrade. This applies both to upgrading a shared lock
* to an upgrade lock and to upgrading a shared lock to a unique lock.
*
* Therefore, none of the following methods is supported:
* unlock_shared_and_lock_upgrade
* unlock_shared_and_lock
* try_unlock_shared_and_lock_upgrade
* try_unlock_shared_and_lock
* try_unlock_shared_and_lock_upgrade_for
* try_unlock_shared_and_lock_for
*/
template <class Mutex>
struct LockTraitsImpl<Mutex, MutexLevel::UPGRADE, false>
: public LockTraitsImpl<Mutex, MutexLevel::SHARED, false> {
static constexpr bool is_timed{false};
static constexpr bool is_shared{true};
static constexpr bool is_upgrade{true};
/**
* Acquire the lock in upgradable mode.
*/
static void lock_upgrade(Mutex& mutex) {
mutex.lock_upgrade();
}
/**
* Release the lock in upgrade mode
*/
static void unlock_upgrade(Mutex& mutex) {
mutex.unlock_upgrade();
}
/**
* Upgrade from an upgradable state to an exclusive state
*/
static void unlock_upgrade_and_lock(Mutex& mutex) {
mutex.unlock_upgrade_and_lock();
}
/**
* Downgrade from an exclusive state to an upgrade state
*/
static void unlock_and_lock_upgrade(Mutex& mutex) {
mutex.unlock_and_lock_upgrade();
}
/**
* Downgrade from an exclusive state to a shared state
*/
static void unlock_and_lock_shared(Mutex& mutex) {
mutex.unlock_and_lock_shared();
}
/**
* Downgrade from an upgrade state to a shared state
*/
static void unlock_upgrade_and_lock_shared(Mutex& mutex) {
mutex.unlock_upgrade_and_lock_shared();
}
};
template <class Mutex>
struct LockTraitsImpl<Mutex, MutexLevel::UNIQUE, true>
: public LockTraitsImpl<Mutex, MutexLevel::UNIQUE, false> {
static constexpr bool is_timed{true};
static constexpr bool is_shared{false};
static constexpr bool is_upgrade{false};
/**
* Acquire the lock exclusively, with a timeout.
*
* Returns true or false indicating if the lock was acquired or not.
*/
template <class Rep, class Period>
static bool try_lock_for(
Mutex& mutex,
const std::chrono::duration<Rep, Period>& timeout) {
return mutex.try_lock_for(timeout);
}
};
/**
* Note that there is no deadly diamond here because all the structs only have
* static functions and static bools which are going to be overridden by the
* lowest level implementation
*/
template <class Mutex>
struct LockTraitsImpl<Mutex, MutexLevel::SHARED, true>
: public LockTraitsImpl<Mutex, MutexLevel::SHARED, false>,
public LockTraitsImpl<Mutex, MutexLevel::UNIQUE, true> {
static constexpr bool is_timed{true};
static constexpr bool is_shared{true};
static constexpr bool is_upgrade{false};
/**
* Acquire the lock exclusively, with a timeout.
*
* Returns true or false indicating if the lock was acquired or not.
*/
template <class Rep, class Period>
static bool try_lock_for(
Mutex& mutex,
const std::chrono::duration<Rep, Period>& timeout) {
return mutex.try_lock_for(timeout);
}
/**
* Acquire the lock in shared (read) mode, with a timeout.
*
* Returns true or false indicating if the lock was acquired or not.
*/
template <class Rep, class Period>
static bool try_lock_shared_for(
Mutex& mutex,
const std::chrono::duration<Rep, Period>& timeout) {
return mutex.try_lock_shared_for(timeout);
}
};
template <class Mutex>
struct LockTraitsImpl<Mutex, MutexLevel::UPGRADE, true>
: public LockTraitsImpl<Mutex, MutexLevel::UPGRADE, false>,
public LockTraitsImpl<Mutex, MutexLevel::SHARED, true> {
static constexpr bool is_timed{true};
static constexpr bool is_shared{true};
static constexpr bool is_upgrade{true};
/**
* Acquire the lock in upgrade mode with a timeout
*
* Returns true or false indicating whether the lock was acquired or not
*/
template <class Rep, class Period>
static bool try_lock_upgrade_for(
Mutex& mutex,
const std::chrono::duration<Rep, Period>& timeout) {
return mutex.try_lock_upgrade_for(timeout);
}
/**
* Try to upgrade from an upgradable state to an exclusive state.
*
* Returns true or false indicating whether the lock was acquired or not
*/
template <class Rep, class Period>
static bool try_unlock_upgrade_and_lock_for(
Mutex& mutex,
const std::chrono::duration<Rep, Period>& timeout) {
return mutex.try_unlock_upgrade_and_lock_for(timeout);
}
};
} // detail
/**
* LockTraits describes details about a particular mutex type.
*
* The default implementation automatically attempts to detect traits
* based on the presence of various member functions.
*
* You can specialize LockTraits to provide custom behavior for lock
* classes that do not use the standard method names
* (lock()/unlock()/lock_shared()/unlock_shared()/try_lock_for())
*
*
* LockTraits contains the following members variables:
* - static constexpr bool is_shared
* True if the lock supports separate shared vs exclusive locking states.
* - static constexpr bool is_timed
* True if the lock supports acquiring the lock with a timeout.
* - static constexpr bool is_upgrade
* True if the lock supports an upgradable state
*
* The following static methods always exist:
* - lock(Mutex& mutex)
* - unlock(Mutex& mutex)
*
* The following static methods may exist, depending on is_shared, is_timed
* and is_upgrade:
* - lock_shared()
*
* - try_lock_for()
* - try_lock_shared_for()
*
* - lock_upgrade()
* - unlock_upgrade_and_lock()
* - unlock_and_lock_upgrade()
* - unlock_and_lock_shared()
* - unlock_upgrade_and_lock_shared()
*
* - try_lock_upgrade_for()
* - try_unlock_upgrade_and_lock_for()
*
* - unlock_shared()
* - unlock_upgrade()
*/
/**
* Decoupling LockTraits and LockTraitsBase so that if people want to fully
* specialize LockTraits then they can inherit from LockTraitsBase instead
* of LockTraits with all the same goodies :)
*/
template <class Mutex>
struct LockTraitsBase
: public detail::LockTraitsImpl<
Mutex,
detail::MutexLevelValueImpl<
detail::LockInterfaceDispatcher<Mutex>::has_lock_unique,
detail::LockInterfaceDispatcher<Mutex>::has_lock_shared,
detail::LockInterfaceDispatcher<Mutex>::has_lock_upgrade>::value,
detail::LockInterfaceDispatcher<Mutex>::has_lock_timed> {};
template <class Mutex>
struct LockTraits : public LockTraitsBase<Mutex> {};
/**
* If the lock is a shared lock, acquire it in shared mode.
* Otherwise, for plain (exclusive-only) locks, perform a normal acquire.
*/
template <class Mutex>
typename std::enable_if<LockTraits<Mutex>::is_shared>::type
lock_shared_or_unique(Mutex& mutex) {
LockTraits<Mutex>::lock_shared(mutex);
}
template <class Mutex>
typename std::enable_if<!LockTraits<Mutex>::is_shared>::type
lock_shared_or_unique(Mutex& mutex) {
LockTraits<Mutex>::lock(mutex);
}
/**
* If the lock is a shared lock, try to acquire it in shared mode, for up to
* the given timeout. Otherwise, for plain (exclusive-only) locks, try to
* perform a normal acquire.
*
* Returns true if the lock was acquired, or false on time out.
*/
template <class Mutex, class Rep, class Period>
typename std::enable_if<LockTraits<Mutex>::is_shared, bool>::type
try_lock_shared_or_unique_for(
Mutex& mutex,
const std::chrono::duration<Rep, Period>& timeout) {
return LockTraits<Mutex>::try_lock_shared_for(mutex, timeout);
}
template <class Mutex, class Rep, class Period>
typename std::enable_if<!LockTraits<Mutex>::is_shared, bool>::type
try_lock_shared_or_unique_for(
Mutex& mutex,
const std::chrono::duration<Rep, Period>& timeout) {
return LockTraits<Mutex>::try_lock_for(mutex, timeout);
}
/**
* Release a lock acquired with lock_shared_or_unique()
*/
template <class Mutex>
typename std::enable_if<LockTraits<Mutex>::is_shared>::type
unlock_shared_or_unique(Mutex& mutex) {
LockTraits<Mutex>::unlock_shared(mutex);
}
template <class Mutex>
typename std::enable_if<!LockTraits<Mutex>::is_shared>::type
unlock_shared_or_unique(Mutex& mutex) {
LockTraits<Mutex>::unlock(mutex);
}
/*
* Lock policy classes.
*
* These can be used as template parameters to provide compile-time
* selection over the type of lock operation to perform.
*/
/**
* A lock policy that performs exclusive lock operations.
*/
struct LockPolicyExclusive {
template <class Mutex>
static void lock(Mutex& mutex) {
LockTraits<Mutex>::lock(mutex);
}
template <class Mutex, class Rep, class Period>
static bool try_lock_for(
Mutex& mutex,
const std::chrono::duration<Rep, Period>& timeout) {
return LockTraits<Mutex>::try_lock_for(mutex, timeout);
}
template <class Mutex>
static void unlock(Mutex& mutex) {
LockTraits<Mutex>::unlock(mutex);
}
};
/**
* A lock policy that performs shared lock operations.
* This policy only works with shared mutex types.
*/
struct LockPolicyShared {
template <class Mutex>
static void lock(Mutex& mutex) {
LockTraits<Mutex>::lock_shared(mutex);
}
template <class Mutex, class Rep, class Period>
static bool try_lock_for(
Mutex& mutex,
const std::chrono::duration<Rep, Period>& timeout) {
return LockTraits<Mutex>::try_lock_shared_for(mutex, timeout);
}
template <class Mutex>
static void unlock(Mutex& mutex) {
LockTraits<Mutex>::unlock_shared(mutex);
}
};
/**
* A lock policy that performs a shared lock operation if a shared mutex type
* is given, or a normal exclusive lock operation on non-shared mutex types.
*/
struct LockPolicyShareable {
template <class Mutex>
static void lock(Mutex& mutex) {
lock_shared_or_unique(mutex);
}
template <class Mutex, class Rep, class Period>
static bool try_lock_for(
Mutex& mutex,
const std::chrono::duration<Rep, Period>& timeout) {
return try_lock_shared_or_unique_for(mutex, timeout);
}
template <class Mutex>
static void unlock(Mutex& mutex) {
unlock_shared_or_unique(mutex);
}
};
/**
* A lock policy with the following mapping
*
* lock() -> lock_upgrade()
* unlock() -> unlock_upgrade()
* try_lock_for -> try_lock_upgrade_for()
*/
struct LockPolicyUpgrade {
template <class Mutex>
static void lock(Mutex& mutex) {
LockTraits<Mutex>::lock_upgrade(mutex);
}
template <class Mutex, class Rep, class Period>
static bool try_lock_for(
Mutex& mutex,
const std::chrono::duration<Rep, Period>& timeout) {
return LockTraits<Mutex>::try_lock_upgrade_for(mutex, timeout);
}
template <class Mutex>
static void unlock(Mutex& mutex) {
LockTraits<Mutex>::unlock_upgrade(mutex);
}
};
/*****************************************************************************
* Policies for all the transitions from possible mutex levels
****************************************************************************/
/**
* A lock policy with the following mapping
*
* lock() -> unlock_upgrade_and_lock()
* unlock() -> unlock()
* try_lock_for -> try_unlock_upgrade_and_lock_for()
*/
struct LockPolicyFromUpgradeToExclusive : public LockPolicyExclusive {
template <class Mutex>
static void lock(Mutex& mutex) {
LockTraits<Mutex>::unlock_upgrade_and_lock(mutex);
}
template <class Mutex, class Rep, class Period>
static bool try_lock_for(
Mutex& mutex,
const std::chrono::duration<Rep, Period>& timeout) {
return LockTraits<Mutex>::try_unlock_upgrade_and_lock_for(mutex, timeout);
}
};
/**
* A lock policy with the following mapping
*
* lock() -> unlock_and_lock_upgrade()
* unlock() -> unlock_upgrade()
* try_lock_for -> unlock_and_lock_upgrade()
*/
struct LockPolicyFromExclusiveToUpgrade : public LockPolicyUpgrade {
template <class Mutex>
static void lock(Mutex& mutex) {
LockTraits<Mutex>::unlock_and_lock_upgrade(mutex);
}
template <class Mutex, class Rep, class Period>
static bool try_lock_for(
Mutex& mutex,
const std::chrono::duration<Rep, Period>&) {
LockTraits<Mutex>::unlock_and_lock_upgrade(mutex);
// downgrade should be non blocking and should succeed
return true;
}
};
/**
* A lock policy with the following mapping
*
* lock() -> unlock_upgrade_and_lock_shared()
* unlock() -> unlock_shared()
* try_lock_for -> unlock_upgrade_and_lock_shared()
*/
struct LockPolicyFromUpgradeToShared : public LockPolicyShared {
template <class Mutex>
static void lock(Mutex& mutex) {
LockTraits<Mutex>::unlock_upgrade_and_lock_shared(mutex);
}
template <class Mutex, class Rep, class Period>
static bool try_lock_for(
Mutex& mutex,
const std::chrono::duration<Rep, Period>&) {
LockTraits<Mutex>::unlock_upgrade_and_lock_shared(mutex);
// downgrade should be non blocking and should succeed
return true;
}
};
/**
* A lock policy with the following mapping
*
* lock() -> unlock_and_lock_shared()
* unlock() -> unlock_shared()
* try_lock_for() -> unlock_and_lock_shared()
*/
struct LockPolicyFromExclusiveToShared : public LockPolicyShared {
template <class Mutex>
static void lock(Mutex& mutex) {
LockTraits<Mutex>::unlock_and_lock_shared(mutex);
}
template <class Mutex, class Rep, class Period>
static bool try_lock_for(
Mutex& mutex,
const std::chrono::duration<Rep, Period>&) {
LockTraits<Mutex>::unlock_and_lock_shared(mutex);
// downgrade should be non blocking and should succeed
return true;
}
};
} // folly

101
ios/Pods/Folly/folly/LockTraitsBoost.h generated Normal file
View File

@ -0,0 +1,101 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file contains LockTraits specializations for boost mutex types.
*
* These need to be specialized simply due to the fact that the timed
* methods take boost::chrono arguments instead of std::chrono.
*/
#pragma once
#include <boost/thread.hpp>
#include <folly/LockTraits.h>
#if FOLLY_LOCK_TRAITS_HAVE_TIMED_MUTEXES
namespace folly {
namespace detail {
/// Convert a std::chrono::duration argument to boost::chrono::duration
template <class Rep, std::intmax_t Num, std::intmax_t Denom>
boost::chrono::duration<Rep, boost::ratio<Num, Denom>> toBoostDuration(
const std::chrono::duration<Rep, std::ratio<Num, Denom>>& d) {
return boost::chrono::duration<Rep, boost::ratio<Num, Denom>>(d.count());
}
}
/**
* LockTraits specialization for boost::shared_mutex
*/
template <>
struct LockTraits<boost::shared_mutex>
: public LockTraitsBase<boost::shared_mutex> {
static constexpr bool is_shared = true;
static constexpr bool is_timed = true;
template <class Rep, class Period>
static bool try_lock_for(
boost::shared_mutex& mutex,
const std::chrono::duration<Rep, Period>& timeout) {
return mutex.try_lock_for(detail::toBoostDuration(timeout));
}
template <class Rep, class Period>
static bool try_lock_shared_for(
boost::shared_mutex& mutex,
const std::chrono::duration<Rep, Period>& timeout) {
return mutex.try_lock_shared_for(detail::toBoostDuration(timeout));
}
};
/**
* LockTraits specialization for boost::timed_mutex
*/
template <>
struct LockTraits<boost::timed_mutex>
: public LockTraitsBase<boost::timed_mutex> {
static constexpr bool is_shared = false;
static constexpr bool is_timed = true;
template <class Rep, class Period>
static bool try_lock_for(
boost::timed_mutex& mutex,
const std::chrono::duration<Rep, Period>& timeout) {
return mutex.try_lock_for(detail::toBoostDuration(timeout));
}
};
/**
* LockTraits specialization for boost::recursive_timed_mutex
*/
template <>
struct LockTraits<boost::recursive_timed_mutex>
: public LockTraitsBase<boost::recursive_timed_mutex> {
static constexpr bool is_shared = false;
static constexpr bool is_timed = true;
template <class Rep, class Period>
static bool try_lock_for(
boost::recursive_timed_mutex& mutex,
const std::chrono::duration<Rep, Period>& timeout) {
return mutex.try_lock_for(detail::toBoostDuration(timeout));
}
};
} // folly
#endif // FOLLY_LOCK_TRAITS_HAVE_TIMED_MUTEXES

55
ios/Pods/Folly/folly/Logging.h generated Normal file
View File

@ -0,0 +1,55 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <atomic>
#include <chrono>
#include <glog/logging.h>
#ifndef FB_LOG_EVERY_MS
/**
* Issues a LOG(severity) no more often than every
* milliseconds. Example:
*
* FB_LOG_EVERY_MS(INFO, 10000) << "At least ten seconds passed"
* " since you last saw this.";
*
* The implementation uses for statements to introduce variables in
* a nice way that doesn't mess surrounding statements. It is thread
* safe. Non-positive intervals will always log.
*/
#define FB_LOG_EVERY_MS(severity, milli_interval) \
for (decltype(milli_interval) FB_LEM_once = 1, \
FB_LEM_interval = (milli_interval); \
FB_LEM_once; ) \
for (::std::chrono::milliseconds::rep FB_LEM_prev, FB_LEM_now = \
FB_LEM_interval <= 0 ? 0 : \
::std::chrono::duration_cast< ::std::chrono::milliseconds>( \
::std::chrono::system_clock::now().time_since_epoch() \
).count(); \
FB_LEM_once; ) \
for (static ::std::atomic< ::std::chrono::milliseconds::rep> \
FB_LEM_hist; FB_LEM_once; FB_LEM_once = 0) \
if (FB_LEM_interval > 0 && \
(FB_LEM_now - (FB_LEM_prev = \
FB_LEM_hist.load(std::memory_order_acquire)) < \
FB_LEM_interval || \
!FB_LEM_hist.compare_exchange_strong(FB_LEM_prev,FB_LEM_now))) {\
} else \
LOG(severity)
#endif

287
ios/Pods/Folly/folly/MPMCPipeline.h generated Normal file
View File

@ -0,0 +1,287 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <utility>
#include <glog/logging.h>
#include <folly/detail/MPMCPipelineDetail.h>
namespace folly {
/**
* Helper tag template to use amplification > 1
*/
template <class T, size_t Amp> class MPMCPipelineStage;
/**
* Multi-Producer, Multi-Consumer pipeline.
*
* A N-stage pipeline is a combination of N+1 MPMC queues (see MPMCQueue.h).
*
* At each stage, you may dequeue the results from the previous stage (possibly
* from multiple threads) and enqueue results to the next stage. Regardless of
* the order of completion, data is delivered to the next stage in the original
* order. Each input is matched with a "ticket" which must be produced
* when enqueueing to the next stage.
*
* A given stage must produce exactly K ("amplification factor", default K=1)
* results for every input. This is enforced by requiring that each ticket
* is used exactly K times.
*
* Usage:
*
* // arguments are queue sizes
* MPMCPipeline<int, std::string, int> pipeline(10, 10, 10);
*
* pipeline.blockingWrite(42);
*
* {
* int val;
* auto ticket = pipeline.blockingReadStage<0>(val);
* pipeline.blockingWriteStage<0>(ticket, folly::to<std::string>(val));
* }
*
* {
* std::string val;
* auto ticket = pipeline.blockingReadStage<1>(val);
* int ival = 0;
* try {
* ival = folly::to<int>(val);
* } catch (...) {
* // We must produce exactly 1 output even on exception!
* }
* pipeline.blockingWriteStage<1>(ticket, ival);
* }
*
* int result;
* pipeline.blockingRead(result);
* // result == 42
*
* To specify amplification factors greater than 1, use
* MPMCPipelineStage<T, amplification> instead of T in the declaration:
*
* MPMCPipeline<int,
* MPMCPipelineStage<std::string, 2>,
* MPMCPipelineStage<int, 4>>
*
* declares a two-stage pipeline: the first stage produces 2 strings
* for each input int, the second stage produces 4 ints for each input string,
* so, overall, the pipeline produces 2*4 = 8 ints for each input int.
*
* Implementation details: we use N+1 MPMCQueue objects; each intermediate
* queue connects two adjacent stages. The MPMCQueue implementation is abused;
* instead of using it as a queue, we insert in the output queue at the
* position determined by the input queue's popTicket_. We guarantee that
* all slots are filled (and therefore the queue doesn't freeze) because
* we require that each step produces exactly K outputs for every input.
*/
template <class In, class... Stages> class MPMCPipeline {
typedef std::tuple<detail::PipelineStageInfo<Stages>...> StageInfos;
typedef std::tuple<
detail::MPMCPipelineStageImpl<In>,
detail::MPMCPipelineStageImpl<
typename detail::PipelineStageInfo<Stages>::value_type>...>
StageTuple;
static constexpr size_t kAmplification =
detail::AmplificationProduct<StageInfos>::value;
public:
/**
* Ticket, returned by blockingReadStage, must be given back to
* blockingWriteStage. Tickets are not thread-safe.
*/
template <size_t Stage>
class Ticket {
public:
~Ticket() noexcept {
CHECK_EQ(remainingUses_, 0) << "All tickets must be completely used!";
}
#ifndef NDEBUG
Ticket() noexcept
: owner_(nullptr),
remainingUses_(0),
value_(0xdeadbeeffaceb00c) {
}
#else
Ticket() noexcept : remainingUses_(0) { }
#endif
Ticket(Ticket&& other) noexcept
:
#ifndef NDEBUG
owner_(other.owner_),
#endif
remainingUses_(other.remainingUses_),
value_(other.value_) {
other.remainingUses_ = 0;
#ifndef NDEBUG
other.owner_ = nullptr;
other.value_ = 0xdeadbeeffaceb00c;
#endif
}
Ticket& operator=(Ticket&& other) noexcept {
if (this != &other) {
this->~Ticket();
new (this) Ticket(std::move(other));
}
return *this;
}
private:
friend class MPMCPipeline;
#ifndef NDEBUG
MPMCPipeline* owner_;
#endif
size_t remainingUses_;
uint64_t value_;
Ticket(MPMCPipeline* owner, size_t amplification, uint64_t value) noexcept
:
#ifndef NDEBUG
owner_(owner),
#endif
remainingUses_(amplification),
value_(value * amplification) {
(void)owner; // -Wunused-parameter
}
uint64_t use(MPMCPipeline* owner) {
CHECK_GT(remainingUses_--, 0);
#ifndef NDEBUG
CHECK(owner == owner_);
#else
(void)owner; // -Wunused-parameter
#endif
return value_++;
}
};
/**
* Default-construct pipeline. Useful to move-assign later,
* just like MPMCQueue, see MPMCQueue.h for more details.
*/
MPMCPipeline() = default;
/**
* Construct a pipeline with N+1 queue sizes.
*/
template <class... Sizes>
explicit MPMCPipeline(Sizes... sizes) : stages_(sizes...) { }
/**
* Push an element into (the first stage of) the pipeline. Blocking.
*/
template <class... Args>
void blockingWrite(Args&&... args) {
std::get<0>(stages_).blockingWrite(std::forward<Args>(args)...);
}
/**
* Try to push an element into (the first stage of) the pipeline.
* Non-blocking.
*/
template <class... Args>
bool write(Args&&... args) {
return std::get<0>(stages_).write(std::forward<Args>(args)...);
}
/**
* Read an element for stage Stage and obtain a ticket. Blocking.
*/
template <size_t Stage>
Ticket<Stage> blockingReadStage(
typename std::tuple_element<Stage, StageTuple>::type::value_type& elem) {
return Ticket<Stage>(
this,
std::tuple_element<Stage, StageInfos>::type::kAmplification,
std::get<Stage>(stages_).blockingRead(elem));
}
/**
* Try to read an element for stage Stage and obtain a ticket.
* Non-blocking.
*/
template <size_t Stage>
bool readStage(
Ticket<Stage>& ticket,
typename std::tuple_element<Stage, StageTuple>::type::value_type& elem) {
uint64_t tval;
if (!std::get<Stage>(stages_).readAndGetTicket(tval, elem)) {
return false;
}
ticket = Ticket<Stage>(
this,
std::tuple_element<Stage, StageInfos>::type::kAmplification,
tval);
return true;
}
/**
* Complete an element in stage Stage (pushing it for stage Stage+1).
* Blocking.
*/
template <size_t Stage, class... Args>
void blockingWriteStage(Ticket<Stage>& ticket, Args&&... args) {
std::get<Stage+1>(stages_).blockingWriteWithTicket(
ticket.use(this),
std::forward<Args>(args)...);
}
/**
* Pop an element from (the final stage of) the pipeline. Blocking.
*/
void blockingRead(
typename std::tuple_element<
sizeof...(Stages),
StageTuple>::type::value_type& elem) {
std::get<sizeof...(Stages)>(stages_).blockingRead(elem);
}
/**
* Try to pop an element from (the final stage of) the pipeline.
* Non-blocking.
*/
bool read(
typename std::tuple_element<
sizeof...(Stages),
StageTuple>::type::value_type& elem) {
return std::get<sizeof...(Stages)>(stages_).read(elem);
}
/**
* Estimate queue size, measured as values from the last stage.
* (so if the pipeline has an amplification factor > 1, pushing an element
* into the first stage will cause sizeGuess() to be == amplification factor)
* Elements "in flight" (currently processed as part of a stage, so not
* in any queue) are also counted.
*/
ssize_t sizeGuess() const noexcept {
return (std::get<0>(stages_).writeCount() * kAmplification -
std::get<sizeof...(Stages)>(stages_).readCount());
}
private:
StageTuple stages_;
};
} // namespaces

1384
ios/Pods/Folly/folly/MPMCQueue.h generated Normal file

File diff suppressed because it is too large Load Diff

232
ios/Pods/Folly/folly/MacAddress.h generated Normal file
View File

@ -0,0 +1,232 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <iosfwd>
#include <folly/Bits.h>
#include <folly/Range.h>
namespace folly {
class IPAddressV6;
/*
* MacAddress represents an IEEE 802 MAC address.
*/
class MacAddress {
public:
static constexpr size_t SIZE = 6;
static const MacAddress BROADCAST;
static const MacAddress ZERO;
/*
* Construct a zero-initialized MacAddress.
*/
MacAddress() {
memset(&bytes_, 0, 8);
}
/*
* Parse a MacAddress from a human-readable string.
* The string must contain 6 one- or two-digit hexadecimal
* numbers, separated by dashes or colons.
* Examples: 00:02:C9:C8:F9:68 or 0-2-c9-c8-f9-68
*/
explicit MacAddress(StringPiece str);
/*
* Construct a MAC address from its 6-byte binary value
*/
static MacAddress fromBinary(ByteRange value) {
MacAddress ret;
ret.setFromBinary(value);
return ret;
}
/*
* Construct a MacAddress from a uint64_t in network byte order.
*
* The first two bytes are ignored, and the MAC address is taken from the
* latter 6 bytes.
*
* This is a static method rather than a constructor to avoid confusion
* between host and network byte order constructors.
*/
static MacAddress fromNBO(uint64_t value) {
return MacAddress(value);
}
/*
* Construct a MacAddress from a uint64_t in host byte order.
*
* The most significant two bytes are ignored, and the MAC address is taken
* from the least significant 6 bytes.
*
* This is a static method rather than a constructor to avoid confusion
* between host and network byte order constructors.
*/
static MacAddress fromHBO(uint64_t value) {
return MacAddress(Endian::big(value));
}
/*
* Construct the multicast MacAddress for the specified multicast IPv6
* address.
*/
static MacAddress createMulticast(IPAddressV6 addr);
/*
* Get a pointer to the MAC address' binary value.
*
* The returned value points to internal storage inside the MacAddress
* object. It is only valid as long as the MacAddress, and its contents may
* change if the MacAddress is updated.
*/
const uint8_t* bytes() const {
return bytes_ + 2;
}
/*
* Return the address as a uint64_t, in network byte order.
*
* The first two bytes will be 0, and the subsequent 6 bytes will contain
* the address in network byte order.
*/
uint64_t u64NBO() const {
return packedBytes();
}
/*
* Return the address as a uint64_t, in host byte order.
*
* The two most significant bytes will be 0, and the remaining 6 bytes will
* contain the address. The most significant of these 6 bytes will contain
* the first byte that appear on the wire, and the least significant byte
* will contain the last byte.
*/
uint64_t u64HBO() const {
// Endian::big() does what we want here, even though we are converting
// from big-endian to host byte order. This swaps if and only if
// the host byte order is little endian.
return Endian::big(packedBytes());
}
/*
* Return a human-readable representation of the MAC address.
*/
std::string toString() const;
/*
* Update the current MacAddress object from a human-readable string.
*/
void parse(StringPiece str);
/*
* Update the current MacAddress object from a 6-byte binary representation.
*/
void setFromBinary(ByteRange value);
bool isBroadcast() const {
return *this == BROADCAST;
}
bool isMulticast() const {
return getByte(0) & 0x1;
}
bool isUnicast() const {
return !isMulticast();
}
/*
* Return true if this MAC address is locally administered.
*
* Locally administered addresses are assigned by the local network
* administrator, and are not guaranteed to be globally unique. (It is
* similar to IPv4's private address space.)
*
* Note that isLocallyAdministered() will return true for the broadcast
* address, since it has the locally administered bit set.
*/
bool isLocallyAdministered() const {
return getByte(0) & 0x2;
}
// Comparison operators.
bool operator==(const MacAddress& other) const {
// All constructors and modifying methods make sure padding is 0,
// so we don't need to mask these bytes out when comparing here.
return packedBytes() == other.packedBytes();
}
bool operator<(const MacAddress& other) const {
return u64HBO() < other.u64HBO();
}
bool operator!=(const MacAddress& other) const {
return !(*this == other);
}
bool operator>(const MacAddress& other) const {
return other < *this;
}
bool operator>=(const MacAddress& other) const {
return !(*this < other);
}
bool operator<=(const MacAddress& other) const {
return !(*this > other);
}
private:
explicit MacAddress(uint64_t valueNBO) {
memcpy(&bytes_, &valueNBO, 8);
// Set the pad bytes to 0.
// This allows us to easily compare two MacAddresses,
// without having to worry about differences in the padding.
bytes_[0] = 0;
bytes_[1] = 0;
}
/* We store the 6 bytes starting at bytes_[2] (most significant)
through bytes_[7] (least).
bytes_[0] and bytes_[1] are always equal to 0 to simplify comparisons.
*/
unsigned char bytes_[8];
inline uint64_t getByte(size_t index) const {
return bytes_[index + 2];
}
uint64_t packedBytes() const {
uint64_t u64;
memcpy(&u64, bytes_, 8);
return u64;
}
};
/* Define toAppend() so to<string> will work */
template <class Tgt>
typename std::enable_if<IsSomeString<Tgt>::value>::type
toAppend(MacAddress address, Tgt* result) {
toAppend(address.toString(), result);
}
std::ostream& operator<<(std::ostream& os, MacAddress address);
} // folly

67
ios/Pods/Folly/folly/MallctlHelper.h generated Normal file
View File

@ -0,0 +1,67 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Some helper functions for mallctl.
#pragma once
#include <folly/Likely.h>
#include <folly/Malloc.h>
#include <stdexcept>
namespace folly {
namespace detail {
void handleMallctlError(const char* cmd, int err);
template <typename T>
void mallctlHelper(const char* cmd, T* out, T* in) {
if (UNLIKELY(!usingJEMalloc())) {
throw std::logic_error("Calling mallctl when not using jemalloc.");
}
size_t outLen = sizeof(T);
int err = mallctl(cmd, out, out ? &outLen : nullptr, in, in ? sizeof(T) : 0);
if (UNLIKELY(err != 0)) {
handleMallctlError(cmd, err);
}
}
} // detail
template <typename T>
void mallctlRead(const char* cmd, T* out) {
detail::mallctlHelper(cmd, out, static_cast<T*>(nullptr));
}
template <typename T>
void mallctlWrite(const char* cmd, T in) {
detail::mallctlHelper(cmd, static_cast<T*>(nullptr), &in);
}
template <typename T>
void mallctlReadWrite(const char* cmd, T* out, T in) {
detail::mallctlHelper(cmd, out, &in);
}
inline void mallctlCall(const char* cmd) {
// Use <unsigned> rather than <void> to avoid sizeof(void).
mallctlRead<unsigned>(cmd, nullptr);
}
} // folly

296
ios/Pods/Folly/folly/Malloc.h generated Normal file
View File

@ -0,0 +1,296 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Functions to provide smarter use of jemalloc, if jemalloc is being used.
// http://www.canonware.com/download/jemalloc/jemalloc-latest/doc/jemalloc.html
#pragma once
/**
* Define various MALLOCX_* macros normally provided by jemalloc. We define
* them so that we don't have to include jemalloc.h, in case the program is
* built without jemalloc support.
*/
#ifndef MALLOCX_LG_ALIGN
#define MALLOCX_LG_ALIGN(la) (la)
#endif
#ifndef MALLOCX_ZERO
#define MALLOCX_ZERO (static_cast<int>(0x40))
#endif
// If using fbstring from libstdc++ (see comment in FBString.h), then
// just define stub code here to typedef the fbstring type into the
// folly namespace.
// This provides backwards compatibility for code that explicitly
// includes and uses fbstring.
#if defined(_GLIBCXX_USE_FB) && !defined(_LIBSTDCXX_FBSTRING)
#include <folly/detail/Malloc.h>
#include <folly/portability/BitsFunctexcept.h>
#include <string>
namespace folly {
using std::goodMallocSize;
using std::jemallocMinInPlaceExpandable;
using std::usingJEMalloc;
using std::smartRealloc;
using std::checkedMalloc;
using std::checkedCalloc;
using std::checkedRealloc;
}
#else // !defined(_GLIBCXX_USE_FB) || defined(_LIBSTDCXX_FBSTRING)
#ifdef _LIBSTDCXX_FBSTRING
#pragma GCC system_header
/**
* Declare *allocx() and mallctl*() as weak symbols. These will be provided by
* jemalloc if we are using jemalloc, or will be NULL if we are using another
* malloc implementation.
*/
extern "C" void* mallocx(size_t, int)
__attribute__((__weak__));
extern "C" void* rallocx(void*, size_t, int)
__attribute__((__weak__));
extern "C" size_t xallocx(void*, size_t, size_t, int)
__attribute__((__weak__));
extern "C" size_t sallocx(const void*, int)
__attribute__((__weak__));
extern "C" void dallocx(void*, int)
__attribute__((__weak__));
extern "C" void sdallocx(void*, size_t, int)
__attribute__((__weak__));
extern "C" size_t nallocx(size_t, int)
__attribute__((__weak__));
extern "C" int mallctl(const char*, void*, size_t*, void*, size_t)
__attribute__((__weak__));
extern "C" int mallctlnametomib(const char*, size_t*, size_t*)
__attribute__((__weak__));
extern "C" int mallctlbymib(const size_t*, size_t, void*, size_t*, void*,
size_t)
__attribute__((__weak__));
#include <bits/functexcept.h>
#define FOLLY_HAVE_MALLOC_H 1
#else // !defined(_LIBSTDCXX_FBSTRING)
#include <folly/detail/Malloc.h> /* nolint */
#include <folly/portability/BitsFunctexcept.h> /* nolint */
#endif
// for malloc_usable_size
// NOTE: FreeBSD 9 doesn't have malloc.h. Its definitions
// are found in stdlib.h.
#if FOLLY_HAVE_MALLOC_H
#include <malloc.h>
#else
#include <stdlib.h>
#endif
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <atomic>
#include <new>
#ifdef _LIBSTDCXX_FBSTRING
namespace std _GLIBCXX_VISIBILITY(default) {
_GLIBCXX_BEGIN_NAMESPACE_VERSION
#else
namespace folly {
#endif
// Cannot depend on Portability.h when _LIBSTDCXX_FBSTRING.
#if defined(__GNUC__)
#define FOLLY_MALLOC_NOINLINE __attribute__((__noinline__))
#if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL) >= 40900
// This is for checked malloc-like functions (returns non-null pointer
// which cannot alias any outstanding pointer).
#define FOLLY_MALLOC_CHECKED_MALLOC \
__attribute__((__returns_nonnull__, __malloc__))
#else
#define FOLLY_MALLOC_CHECKED_MALLOC __attribute__((__malloc__))
#endif
#else
#define FOLLY_MALLOC_NOINLINE
#define FOLLY_MALLOC_CHECKED_MALLOC
#endif
/**
* Determine if we are using jemalloc or not.
*/
FOLLY_MALLOC_NOINLINE inline bool usingJEMalloc() noexcept {
// Checking for rallocx != NULL is not sufficient; we may be in a dlopen()ed
// module that depends on libjemalloc, so rallocx is resolved, but the main
// program might be using a different memory allocator.
// How do we determine that we're using jemalloc? In the hackiest
// way possible. We allocate memory using malloc() and see if the
// per-thread counter of allocated memory increases. This makes me
// feel dirty inside. Also note that this requires jemalloc to have
// been compiled with --enable-stats.
static const bool result = [] () noexcept {
// Some platforms (*cough* OSX *cough*) require weak symbol checks to be
// in the form if (mallctl != nullptr). Not if (mallctl) or if (!mallctl)
// (!!). http://goo.gl/xpmctm
if (mallocx == nullptr || rallocx == nullptr || xallocx == nullptr
|| sallocx == nullptr || dallocx == nullptr || sdallocx == nullptr
|| nallocx == nullptr || mallctl == nullptr
|| mallctlnametomib == nullptr || mallctlbymib == nullptr) {
return false;
}
// "volatile" because gcc optimizes out the reads from *counter, because
// it "knows" malloc doesn't modify global state...
/* nolint */ volatile uint64_t* counter;
size_t counterLen = sizeof(uint64_t*);
if (mallctl("thread.allocatedp", static_cast<void*>(&counter), &counterLen,
nullptr, 0) != 0) {
return false;
}
if (counterLen != sizeof(uint64_t*)) {
return false;
}
uint64_t origAllocated = *counter;
// Static because otherwise clever compilers will find out that
// the ptr is not used and does not escape the scope, so they will
// just optimize away the malloc.
static const void* ptr = malloc(1);
if (!ptr) {
// wtf, failing to allocate 1 byte
return false;
}
return (origAllocated != *counter);
}();
return result;
}
inline size_t goodMallocSize(size_t minSize) noexcept {
if (minSize == 0) {
return 0;
}
if (!usingJEMalloc()) {
// Not using jemalloc - no smarts
return minSize;
}
return nallocx(minSize, 0);
}
// We always request "good" sizes for allocation, so jemalloc can
// never grow in place small blocks; they're already occupied to the
// brim. Blocks larger than or equal to 4096 bytes can in fact be
// expanded in place, and this constant reflects that.
static const size_t jemallocMinInPlaceExpandable = 4096;
/**
* Trivial wrappers around malloc, calloc, realloc that check for allocation
* failure and throw std::bad_alloc in that case.
*/
inline void* checkedMalloc(size_t size) {
void* p = malloc(size);
if (!p) std::__throw_bad_alloc();
return p;
}
inline void* checkedCalloc(size_t n, size_t size) {
void* p = calloc(n, size);
if (!p) std::__throw_bad_alloc();
return p;
}
inline void* checkedRealloc(void* ptr, size_t size) {
void* p = realloc(ptr, size);
if (!p) std::__throw_bad_alloc();
return p;
}
/**
* This function tries to reallocate a buffer of which only the first
* currentSize bytes are used. The problem with using realloc is that
* if currentSize is relatively small _and_ if realloc decides it
* needs to move the memory chunk to a new buffer, then realloc ends
* up copying data that is not used. It's impossible to hook into
* GNU's malloc to figure whether expansion will occur in-place or as
* a malloc-copy-free troika. (If an expand_in_place primitive would
* be available, smartRealloc would use it.) As things stand, this
* routine just tries to call realloc() (thus benefitting of potential
* copy-free coalescing) unless there's too much slack memory.
*/
FOLLY_MALLOC_CHECKED_MALLOC FOLLY_MALLOC_NOINLINE inline void* smartRealloc(
void* p,
const size_t currentSize,
const size_t currentCapacity,
const size_t newCapacity) {
assert(p);
assert(currentSize <= currentCapacity &&
currentCapacity < newCapacity);
if (usingJEMalloc()) {
// using jemalloc's API. Don't forget that jemalloc can never grow
// in place blocks smaller than 4096 bytes.
//
// NB: newCapacity may not be precisely equal to a jemalloc size class,
// i.e. newCapacity is not guaranteed to be the result of a
// goodMallocSize() call, therefore xallocx() may return more than
// newCapacity bytes of space. Use >= rather than == to check whether
// xallocx() successfully expanded in place.
if (currentCapacity >= jemallocMinInPlaceExpandable &&
xallocx(p, newCapacity, 0, 0) >= newCapacity) {
// Managed to expand in place
return p;
}
// Cannot expand; must move
auto const result = checkedMalloc(newCapacity);
std::memcpy(result, p, currentSize);
free(p);
return result;
}
// No jemalloc no honey
auto const slack = currentCapacity - currentSize;
if (slack * 2 > currentSize) {
// Too much slack, malloc-copy-free cycle:
auto const result = checkedMalloc(newCapacity);
std::memcpy(result, p, currentSize);
free(p);
return result;
}
// If there's not too much slack, we realloc in hope of coalescing
return checkedRealloc(p, newCapacity);
}
#ifdef _LIBSTDCXX_FBSTRING
_GLIBCXX_END_NAMESPACE_VERSION
#endif
} // folly
#endif // !defined(_GLIBCXX_USE_FB) || defined(_LIBSTDCXX_FBSTRING)

151
ios/Pods/Folly/folly/MapUtil.h generated Normal file
View File

@ -0,0 +1,151 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/Conv.h>
#include <folly/Optional.h>
namespace folly {
/**
* Given a map and a key, return the value corresponding to the key in the map,
* or a given default value if the key doesn't exist in the map.
*/
template <class Map>
typename Map::mapped_type get_default(
const Map& map, const typename Map::key_type& key,
const typename Map::mapped_type& dflt =
typename Map::mapped_type()) {
auto pos = map.find(key);
return (pos != map.end() ? pos->second : dflt);
}
/**
* Give a map and a key, return the value corresponding to the key in the map,
* or a given default value if the key doesn't exist in the map.
*/
template <
class Map,
typename Func,
typename = typename std::enable_if<std::is_convertible<
typename std::result_of<Func()>::type,
typename Map::mapped_type>::value>::type>
typename Map::mapped_type
get_default(const Map& map, const typename Map::key_type& key, Func&& dflt) {
auto pos = map.find(key);
return pos != map.end() ? pos->second : dflt();
}
/**
* Given a map and a key, return the value corresponding to the key in the map,
* or throw an exception of the specified type.
*/
template <class E = std::out_of_range, class Map>
const typename Map::mapped_type& get_or_throw(
const Map& map,
const typename Map::key_type& key,
const std::string& exceptionStrPrefix = std::string()) {
auto pos = map.find(key);
if (pos != map.end()) {
return pos->second;
}
throw E(folly::to<std::string>(exceptionStrPrefix, key));
}
template <class E = std::out_of_range, class Map>
typename Map::mapped_type& get_or_throw(
Map& map,
const typename Map::key_type& key,
const std::string& exceptionStrPrefix = std::string()) {
auto pos = map.find(key);
if (pos != map.end()) {
return pos->second;
}
throw E(folly::to<std::string>(exceptionStrPrefix, key));
}
/**
* Given a map and a key, return a Optional<V> if the key exists and None if the
* key does not exist in the map.
*/
template <class Map>
folly::Optional<typename Map::mapped_type> get_optional(
const Map& map, const typename Map::key_type& key) {
auto pos = map.find(key);
if (pos != map.end()) {
return folly::Optional<typename Map::mapped_type>(pos->second);
} else {
return folly::none;
}
}
/**
* Given a map and a key, return a reference to the value corresponding to the
* key in the map, or the given default reference if the key doesn't exist in
* the map.
*/
template <class Map>
const typename Map::mapped_type& get_ref_default(
const Map& map, const typename Map::key_type& key,
const typename Map::mapped_type& dflt) {
auto pos = map.find(key);
return (pos != map.end() ? pos->second : dflt);
}
/**
* Given a map and a key, return a reference to the value corresponding to the
* key in the map, or the given default reference if the key doesn't exist in
* the map.
*/
template <
class Map,
typename Func,
typename = typename std::enable_if<std::is_convertible<
typename std::result_of<Func()>::type,
const typename Map::mapped_type&>::value>::type,
typename = typename std::enable_if<
std::is_reference<typename std::result_of<Func()>::type>::value>::type>
const typename Map::mapped_type& get_ref_default(
const Map& map,
const typename Map::key_type& key,
Func&& dflt) {
auto pos = map.find(key);
return (pos != map.end() ? pos->second : dflt());
}
/**
* Given a map and a key, return a pointer to the value corresponding to the
* key in the map, or nullptr if the key doesn't exist in the map.
*/
template <class Map>
const typename Map::mapped_type* get_ptr(
const Map& map, const typename Map::key_type& key) {
auto pos = map.find(key);
return (pos != map.end() ? &pos->second : nullptr);
}
/**
* Non-const overload of the above.
*/
template <class Map>
typename Map::mapped_type* get_ptr(
Map& map, const typename Map::key_type& key) {
auto pos = map.find(key);
return (pos != map.end() ? &pos->second : nullptr);
}
} // namespace folly

201
ios/Pods/Folly/folly/Math.h generated Normal file
View File

@ -0,0 +1,201 @@
/*
* Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Some arithmetic functions that seem to pop up or get hand-rolled a lot.
* So far they are all focused on integer division.
*/
#pragma once
#include <stdint.h>
#include <limits>
#include <type_traits>
namespace folly {
namespace detail {
template <typename T>
inline constexpr T divFloorBranchless(T num, T denom) {
// floor != trunc when the answer isn't exact and truncation went the
// wrong way (truncation went toward positive infinity). That happens
// when the true answer is negative, which happens when num and denom
// have different signs. The following code compiles branch-free on
// many platforms.
return (num / denom) +
((num % denom) != 0 ? 1 : 0) *
(std::is_signed<T>::value && (num ^ denom) < 0 ? -1 : 0);
}
template <typename T>
inline constexpr T divFloorBranchful(T num, T denom) {
// First case handles negative result by preconditioning numerator.
// Preconditioning decreases the magnitude of the numerator, which is
// itself sign-dependent. Second case handles zero or positive rational
// result, where trunc and floor are the same.
return std::is_signed<T>::value && (num ^ denom) < 0 && num != 0
? (num + (num > 0 ? -1 : 1)) / denom - 1
: num / denom;
}
template <typename T>
inline constexpr T divCeilBranchless(T num, T denom) {
// ceil != trunc when the answer isn't exact (truncation occurred)
// and truncation went away from positive infinity. That happens when
// the true answer is positive, which happens when num and denom have
// the same sign.
return (num / denom) +
((num % denom) != 0 ? 1 : 0) *
(std::is_signed<T>::value && (num ^ denom) < 0 ? 0 : 1);
}
template <typename T>
inline constexpr T divCeilBranchful(T num, T denom) {
// First case handles negative or zero rational result, where trunc and ceil
// are the same.
// Second case handles positive result by preconditioning numerator.
// Preconditioning decreases the magnitude of the numerator, which is
// itself sign-dependent.
return (std::is_signed<T>::value && (num ^ denom) < 0) || num == 0
? num / denom
: (num + (num > 0 ? -1 : 1)) / denom + 1;
}
template <typename T>
inline constexpr T divRoundAwayBranchless(T num, T denom) {
// away != trunc whenever truncation actually occurred, which is when
// there is a non-zero remainder. If the unrounded result is negative
// then fixup moves it toward negative infinity. If the unrounded
// result is positive then adjustment makes it larger.
return (num / denom) +
((num % denom) != 0 ? 1 : 0) *
(std::is_signed<T>::value && (num ^ denom) < 0 ? -1 : 1);
}
template <typename T>
inline constexpr T divRoundAwayBranchful(T num, T denom) {
// First case of second ternary operator handles negative rational
// result, which is the same as divFloor. Second case of second ternary
// operator handles positive result, which is the same as divCeil.
// Zero case is separated for simplicity.
return num == 0 ? 0
: (num + (num > 0 ? -1 : 1)) / denom +
(std::is_signed<T>::value && (num ^ denom) < 0 ? -1 : 1);
}
template <typename N, typename D>
using IdivResultType = typename std::enable_if<
std::is_integral<N>::value && std::is_integral<D>::value &&
!std::is_same<N, bool>::value &&
!std::is_same<D, bool>::value,
decltype(N{1} / D{1})>::type;
}
#if defined(__arm__) && !FOLLY_A64
constexpr auto kIntegerDivisionGivesRemainder = false;
#else
constexpr auto kIntegerDivisionGivesRemainder = true;
#endif
/**
* Returns num/denom, rounded toward negative infinity. Put another way,
* returns the largest integral value that is less than or equal to the
* exact (not rounded) fraction num/denom.
*
* The matching remainder (num - divFloor(num, denom) * denom) can be
* negative only if denom is negative, unlike in truncating division.
* Note that for unsigned types this is the same as the normal integer
* division operator. divFloor is equivalent to python's integral division
* operator //.
*
* This function undergoes the same integer promotion rules as a
* built-in operator, except that we don't allow bool -> int promotion.
* This function is undefined if denom == 0. It is also undefined if the
* result type T is a signed type, num is std::numeric_limits<T>::min(),
* and denom is equal to -1 after conversion to the result type.
*/
template <typename N, typename D>
inline constexpr detail::IdivResultType<N, D> divFloor(N num, D denom) {
using R = decltype(num / denom);
return kIntegerDivisionGivesRemainder && std::is_signed<R>::value
? detail::divFloorBranchless<R>(num, denom)
: detail::divFloorBranchful<R>(num, denom);
}
/**
* Returns num/denom, rounded toward positive infinity. Put another way,
* returns the smallest integral value that is greater than or equal to
* the exact (not rounded) fraction num/denom.
*
* This function undergoes the same integer promotion rules as a
* built-in operator, except that we don't allow bool -> int promotion.
* This function is undefined if denom == 0. It is also undefined if the
* result type T is a signed type, num is std::numeric_limits<T>::min(),
* and denom is equal to -1 after conversion to the result type.
*/
template <typename N, typename D>
inline constexpr detail::IdivResultType<N, D> divCeil(N num, D denom) {
using R = decltype(num / denom);
return kIntegerDivisionGivesRemainder && std::is_signed<R>::value
? detail::divCeilBranchless<R>(num, denom)
: detail::divCeilBranchful<R>(num, denom);
}
/**
* Returns num/denom, rounded toward zero. If num and denom are non-zero
* and have different signs (so the unrounded fraction num/denom is
* negative), returns divCeil, otherwise returns divFloor. If T is an
* unsigned type then this is always equal to divFloor.
*
* Note that this is the same as the normal integer division operator,
* at least since C99 (before then the rounding for negative results was
* implementation defined). This function is here for completeness and
* as a place to hang this comment.
*
* This function undergoes the same integer promotion rules as a
* built-in operator, except that we don't allow bool -> int promotion.
* This function is undefined if denom == 0. It is also undefined if the
* result type T is a signed type, num is std::numeric_limits<T>::min(),
* and denom is equal to -1 after conversion to the result type.
*/
template <typename N, typename D>
inline constexpr detail::IdivResultType<N, D> divTrunc(N num, D denom) {
return num / denom;
}
/**
* Returns num/denom, rounded away from zero. If num and denom are
* non-zero and have different signs (so the unrounded fraction num/denom
* is negative), returns divFloor, otherwise returns divCeil. If T is
* an unsigned type then this is always equal to divCeil.
*
* This function undergoes the same integer promotion rules as a
* built-in operator, except that we don't allow bool -> int promotion.
* This function is undefined if denom == 0. It is also undefined if the
* result type T is a signed type, num is std::numeric_limits<T>::min(),
* and denom is equal to -1 after conversion to the result type.
*/
template <typename N, typename D>
inline constexpr detail::IdivResultType<N, D> divRoundAway(N num, D denom) {
using R = decltype(num / denom);
return kIntegerDivisionGivesRemainder && std::is_signed<R>::value
? detail::divRoundAwayBranchless<R>(num, denom)
: detail::divRoundAwayBranchful<R>(num, denom);
}
} // namespace folly

Some files were not shown because too many files have changed in this diff Show More