mirror of
https://github.com/ecency/ecency-mobile.git
synced 2024-11-22 23:28:56 +03:00
Merge pull request #890 from esteemapp/react-native-upgrade
Upgrading rn to v0.59.8
This commit is contained in:
commit
b75995f5c2
@ -24,7 +24,6 @@
|
||||
[libs]
|
||||
node_modules/react-native/Libraries/react-native/react-native-interface.js
|
||||
node_modules/react-native/flow/
|
||||
node_modules/react-native/flow-github/
|
||||
|
||||
[options]
|
||||
emoji=true
|
||||
@ -67,4 +66,4 @@ suppress_comment=\\(.\\|\n\\)*\\$FlowFixedInNextDeploy
|
||||
suppress_comment=\\(.\\|\n\\)*\\$FlowExpectedError
|
||||
|
||||
[version]
|
||||
^0.78.0
|
||||
^0.92.0
|
||||
|
@ -8,23 +8,13 @@
|
||||
# - `buck install -r android/app` - compile, install and run application
|
||||
#
|
||||
|
||||
load(":build_defs.bzl", "create_aar_targets", "create_jar_targets")
|
||||
|
||||
lib_deps = []
|
||||
|
||||
for jarfile in glob(['libs/*.jar']):
|
||||
name = 'jars__' + jarfile[jarfile.rindex('/') + 1: jarfile.rindex('.jar')]
|
||||
lib_deps.append(':' + name)
|
||||
prebuilt_jar(
|
||||
name = name,
|
||||
binary_jar = jarfile,
|
||||
)
|
||||
create_aar_targets(glob(["libs/*.aar"]))
|
||||
|
||||
for aarfile in glob(['libs/*.aar']):
|
||||
name = 'aars__' + aarfile[aarfile.rindex('/') + 1: aarfile.rindex('.aar')]
|
||||
lib_deps.append(':' + name)
|
||||
android_prebuilt_aar(
|
||||
name = name,
|
||||
aar = aarfile,
|
||||
)
|
||||
create_jar_targets(glob(["libs/*.jar"]))
|
||||
|
||||
android_library(
|
||||
name = "all-libs",
|
||||
|
@ -98,7 +98,11 @@ def enableProguardInReleaseBuilds = false
|
||||
|
||||
android {
|
||||
compileSdkVersion rootProject.ext.compileSdkVersion
|
||||
buildToolsVersion rootProject.ext.buildToolsVersion
|
||||
|
||||
compileOptions {
|
||||
sourceCompatibility JavaVersion.VERSION_1_8
|
||||
targetCompatibility JavaVersion.VERSION_1_8
|
||||
}
|
||||
|
||||
defaultConfig {
|
||||
applicationId "app.esteem.mobile"
|
||||
@ -106,9 +110,6 @@ android {
|
||||
targetSdkVersion rootProject.ext.targetSdkVersion
|
||||
versionCode versionMajor * 10000 + versionMinor * 100 + versionPatch
|
||||
versionName "${versionMajor}.${versionMinor}.${versionPatch}"
|
||||
ndk {
|
||||
abiFilters "armeabi-v7a", "x86"
|
||||
}
|
||||
vectorDrawables.useSupportLibrary = true
|
||||
}
|
||||
splits {
|
||||
@ -116,7 +117,7 @@ android {
|
||||
reset()
|
||||
enable enableSeparateBuildPerCPUArchitecture
|
||||
universalApk false // If true, also generate a universal APK
|
||||
include "armeabi-v7a", "x86"
|
||||
include "armeabi-v7a", "x86", "arm64-v8a", "x86_64"
|
||||
}
|
||||
}
|
||||
buildTypes {
|
||||
@ -130,7 +131,7 @@ android {
|
||||
variant.outputs.each { output ->
|
||||
// For each separate APK per architecture, set a unique version code as described here:
|
||||
// http://tools.android.com/tech-docs/new-build-system/user-guide/apk-splits
|
||||
def versionCodes = ["armeabi-v7a":1, "x86":2]
|
||||
def versionCodes = ["armeabi-v7a":1, "x86":2, "arm64-v8a": 3, "x86_64": 4]
|
||||
def abi = output.getFilter(OutputFile.ABI)
|
||||
if (abi != null) { // null for the universal-debug, universal-release variants
|
||||
output.versionCodeOverride =
|
||||
@ -147,21 +148,21 @@ configurations.all {
|
||||
}
|
||||
|
||||
dependencies {
|
||||
compile project(':react-native-version-number')
|
||||
compile project(':react-native-code-push')
|
||||
compile project(':realm')
|
||||
compile project(':react-native-fast-image')
|
||||
compile project(':react-native-image-crop-picker')
|
||||
compile project(':appcenter-push')
|
||||
compile project(':react-native-view-overflow')
|
||||
compile project(':react-native-vector-icons')
|
||||
compile project(':react-native-linear-gradient')
|
||||
compile project(':react-native-config')
|
||||
compile project(':appcenter-crashes')
|
||||
compile project(':appcenter-analytics')
|
||||
compile project(':appcenter')
|
||||
implementation project(':react-native-version-number')
|
||||
implementation project(':react-native-code-push')
|
||||
implementation project(':realm')
|
||||
implementation project(':react-native-fast-image')
|
||||
implementation project(':react-native-image-crop-picker')
|
||||
implementation project(':appcenter-push')
|
||||
implementation project(':react-native-view-overflow')
|
||||
implementation project(':react-native-vector-icons')
|
||||
implementation project(':react-native-linear-gradient')
|
||||
implementation project(':react-native-config')
|
||||
implementation project(':appcenter-crashes')
|
||||
implementation project(':appcenter-analytics')
|
||||
implementation project(':appcenter')
|
||||
implementation 'com.microsoft.appcenter:appcenter-push:1.11.2'
|
||||
compile 'com.facebook.fresco:animated-gif:1.10.0'
|
||||
implementation 'com.facebook.fresco:animated-gif:1.10.0'
|
||||
implementation fileTree(dir: "libs", include: ["*.jar"])
|
||||
implementation "com.android.support:appcompat-v7:${rootProject.ext.supportLibVersion}"
|
||||
implementation "com.facebook.react:react-native:+" // From node_modules
|
||||
|
19
android/app/build_defs.bzl
Normal file
19
android/app/build_defs.bzl
Normal file
@ -0,0 +1,19 @@
|
||||
"""Helper definitions to glob .aar and .jar targets"""
|
||||
|
||||
def create_aar_targets(aarfiles):
|
||||
for aarfile in aarfiles:
|
||||
name = "aars__" + aarfile[aarfile.rindex("/") + 1:aarfile.rindex(".aar")]
|
||||
lib_deps.append(":" + name)
|
||||
android_prebuilt_aar(
|
||||
name = name,
|
||||
aar = aarfile,
|
||||
)
|
||||
|
||||
def create_jar_targets(jarfiles):
|
||||
for jarfile in jarfiles:
|
||||
name = "jars__" + jarfile[jarfile.rindex("/") + 1:jarfile.rindex(".jar")]
|
||||
lib_deps.append(":" + name)
|
||||
prebuilt_jar(
|
||||
name = name,
|
||||
binary_jar = jarfile,
|
||||
)
|
8
android/app/src/debug/AndroidManifest.xml
Normal file
8
android/app/src/debug/AndroidManifest.xml
Normal file
@ -0,0 +1,8 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
|
||||
xmlns:tools="http://schemas.android.com/tools">
|
||||
|
||||
<uses-permission android:name="android.permission.SYSTEM_ALERT_WINDOW"/>
|
||||
|
||||
<application android:usesCleartextTraffic="true" tools:targetApi="28" tools:ignore="GoogleAppIndexingWarning" />
|
||||
</manifest>
|
@ -2,7 +2,6 @@
|
||||
package="app.esteem.mobile">
|
||||
|
||||
<uses-permission android:name="android.permission.INTERNET" />
|
||||
<uses-permission android:name="android.permission.SYSTEM_ALERT_WINDOW"/>
|
||||
<uses-permission android:name="android.permission.CAMERA"/>
|
||||
<uses-feature android:name="android.hardware.camera" android:required="false" />
|
||||
<uses-feature android:name="android.hardware.camera.front" android:required="false" />
|
||||
@ -10,6 +9,7 @@
|
||||
android:name=".MainApplication"
|
||||
android:label="@string/app_name"
|
||||
android:icon="@mipmap/ic_launcher"
|
||||
android:roundIcon="@mipmap/ic_launcher_round"
|
||||
android:allowBackup="false"
|
||||
android:theme="@style/AppTheme">
|
||||
<activity
|
||||
|
@ -14,18 +14,18 @@ def getNpmVersionArray() { // major [0], minor [1], patch [2]
|
||||
|
||||
buildscript {
|
||||
ext {
|
||||
buildToolsVersion = "27.0.3"
|
||||
buildToolsVersion = "28.0.3"
|
||||
minSdkVersion = 16
|
||||
compileSdkVersion = 27
|
||||
targetSdkVersion = 26
|
||||
supportLibVersion = "27.1.1"
|
||||
compileSdkVersion = 28
|
||||
targetSdkVersion = 28
|
||||
supportLibVersion = "28.0.0"
|
||||
}
|
||||
repositories {
|
||||
google()
|
||||
jcenter()
|
||||
}
|
||||
dependencies {
|
||||
classpath 'com.android.tools.build:gradle:3.1.4'
|
||||
classpath 'com.android.tools.build:gradle:3.3.1'
|
||||
// Add this line
|
||||
classpath 'com.google.gms:google-services:4.0.2'
|
||||
// NOTE: Do not place your application dependencies here; they belong
|
||||
@ -66,9 +66,3 @@ subprojects {project ->
|
||||
versionPatch = npmVersion[2]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
task wrapper(type: Wrapper) {
|
||||
gradleVersion = '4.4'
|
||||
distributionUrl = distributionUrl.replace("bin", "all")
|
||||
}
|
||||
|
@ -2,4 +2,4 @@ distributionBase=GRADLE_USER_HOME
|
||||
distributionPath=wrapper/dists
|
||||
zipStoreBase=GRADLE_USER_HOME
|
||||
zipStorePath=wrapper/dists
|
||||
distributionUrl=https\://services.gradle.org/distributions/gradle-4.4-all.zip
|
||||
distributionUrl=https\://services.gradle.org/distributions/gradle-4.10.2-all.zip
|
||||
|
3
babel.config.js
Normal file
3
babel.config.js
Normal file
@ -0,0 +1,3 @@
|
||||
module.exports = {
|
||||
presets: ['module:metro-react-native-babel-preset'],
|
||||
};
|
@ -30,16 +30,14 @@ target 'eSteem' do
|
||||
pod 'glog', :podspec => '../node_modules/react-native/third-party-podspecs/glog.podspec'
|
||||
pod 'Folly', :podspec => '../node_modules/react-native/third-party-podspecs/Folly.podspec'
|
||||
|
||||
pod 'RNImageCropPicker', :path => '../node_modules/react-native-image-crop-picker'
|
||||
pod 'CodePush', :path => '../node_modules/react-native-code-push'
|
||||
|
||||
pod 'react-native-fast-image', :path => '../node_modules/react-native-fast-image'
|
||||
|
||||
pod 'CodePush', :path => '../node_modules/react-native-code-push'
|
||||
pod 'RNImageCropPicker', :path => '../node_modules/react-native-image-crop-picker'
|
||||
|
||||
pod 'react-native-version-number', :path => '../node_modules/react-native-version-number'
|
||||
|
||||
|
||||
|
||||
target 'eSteemTests' do
|
||||
inherit! :search_paths
|
||||
# Pods for testing
|
||||
|
@ -8,25 +8,31 @@ PODS:
|
||||
- AppCenter/Core
|
||||
- AppCenterReactNativeShared (1.13.0):
|
||||
- AppCenter/Core (= 1.14.0)
|
||||
- Base64 (1.1.2)
|
||||
- boost-for-react-native (1.63.0)
|
||||
- CodePush (5.6.0):
|
||||
- Base64 (~> 1.1)
|
||||
- JWT (~> 3.0.0-beta.7)
|
||||
- CodePush (1000.0.0):
|
||||
- CodePush/Base64 (= 1000.0.0)
|
||||
- CodePush/Core (= 1000.0.0)
|
||||
- CodePush/JWT (= 1000.0.0)
|
||||
- CodePush/SSZipArchive (= 1000.0.0)
|
||||
- React
|
||||
- CodePush/Base64 (1000.0.0):
|
||||
- React
|
||||
- CodePush/Core (1000.0.0):
|
||||
- React
|
||||
- CodePush/JWT (1000.0.0):
|
||||
- React
|
||||
- CodePush/SSZipArchive (1000.0.0):
|
||||
- React
|
||||
- SSZipArchive (~> 2.1)
|
||||
- DoubleConversion (1.1.6)
|
||||
- FLAnimatedImage (1.0.12)
|
||||
- Folly (2016.10.31.00):
|
||||
- Folly (2018.10.22.00):
|
||||
- boost-for-react-native
|
||||
- DoubleConversion
|
||||
- glog
|
||||
- glog (0.3.5)
|
||||
- JWT (3.0.0-beta.11):
|
||||
- Base64 (~> 1.1.2)
|
||||
- QBImagePickerController (3.4.0)
|
||||
- React (0.57.8):
|
||||
- React/Core (= 0.57.8)
|
||||
- React (0.59.8):
|
||||
- React/Core (= 0.59.8)
|
||||
- react-native-fast-image (4.0.14):
|
||||
- FLAnimatedImage
|
||||
- React
|
||||
@ -34,43 +40,51 @@ PODS:
|
||||
- SDWebImage/GIF
|
||||
- react-native-version-number (0.3.6):
|
||||
- React
|
||||
- React/Core (0.57.8):
|
||||
- yoga (= 0.57.8.React)
|
||||
- React/CxxBridge (0.57.8):
|
||||
- Folly (= 2016.10.31.00)
|
||||
- React/Core (0.59.8):
|
||||
- yoga (= 0.59.8.React)
|
||||
- React/CxxBridge (0.59.8):
|
||||
- Folly (= 2018.10.22.00)
|
||||
- React/Core
|
||||
- React/cxxreact
|
||||
- React/cxxreact (0.57.8):
|
||||
- React/jsiexecutor
|
||||
- React/cxxreact (0.59.8):
|
||||
- boost-for-react-native (= 1.63.0)
|
||||
- Folly (= 2016.10.31.00)
|
||||
- React/jschelpers
|
||||
- DoubleConversion
|
||||
- Folly (= 2018.10.22.00)
|
||||
- glog
|
||||
- React/jsinspector
|
||||
- React/DevSupport (0.57.8):
|
||||
- React/DevSupport (0.59.8):
|
||||
- React/Core
|
||||
- React/RCTWebSocket
|
||||
- React/fishhook (0.57.8)
|
||||
- React/jschelpers (0.57.8):
|
||||
- Folly (= 2016.10.31.00)
|
||||
- React/PrivateDatabase
|
||||
- React/jsinspector (0.57.8)
|
||||
- React/PrivateDatabase (0.57.8)
|
||||
- React/RCTActionSheet (0.57.8):
|
||||
- React/fishhook (0.59.8)
|
||||
- React/jsi (0.59.8):
|
||||
- DoubleConversion
|
||||
- Folly (= 2018.10.22.00)
|
||||
- glog
|
||||
- React/jsiexecutor (0.59.8):
|
||||
- DoubleConversion
|
||||
- Folly (= 2018.10.22.00)
|
||||
- glog
|
||||
- React/cxxreact
|
||||
- React/jsi
|
||||
- React/jsinspector (0.59.8)
|
||||
- React/RCTActionSheet (0.59.8):
|
||||
- React/Core
|
||||
- React/RCTAnimation (0.57.8):
|
||||
- React/RCTAnimation (0.59.8):
|
||||
- React/Core
|
||||
- React/RCTBlob (0.57.8):
|
||||
- React/RCTBlob (0.59.8):
|
||||
- React/Core
|
||||
- React/RCTCameraRoll (0.57.8):
|
||||
- React/RCTCameraRoll (0.59.8):
|
||||
- React/Core
|
||||
- React/RCTImage
|
||||
- React/RCTImage (0.57.8):
|
||||
- React/RCTImage (0.59.8):
|
||||
- React/Core
|
||||
- React/RCTNetwork
|
||||
- React/RCTNetwork (0.57.8):
|
||||
- React/RCTNetwork (0.59.8):
|
||||
- React/Core
|
||||
- React/RCTText (0.57.8):
|
||||
- React/RCTText (0.59.8):
|
||||
- React/Core
|
||||
- React/RCTWebSocket (0.57.8):
|
||||
- React/RCTWebSocket (0.59.8):
|
||||
- React/Core
|
||||
- React/fishhook
|
||||
- React/RCTBlob
|
||||
@ -83,8 +97,7 @@ PODS:
|
||||
- SDWebImage/GIF (4.4.6):
|
||||
- FLAnimatedImage (~> 1.0)
|
||||
- SDWebImage/Core
|
||||
- SSZipArchive (2.2.2)
|
||||
- yoga (0.57.8.React)
|
||||
- yoga (0.59.8.React)
|
||||
|
||||
DEPENDENCIES:
|
||||
- AppCenter/Analytics (~> 1.14.0)
|
||||
@ -114,14 +127,11 @@ SPEC REPOS:
|
||||
https://github.com/cocoapods/specs.git:
|
||||
- AppCenter
|
||||
- AppCenterReactNativeShared
|
||||
- Base64
|
||||
- boost-for-react-native
|
||||
- FLAnimatedImage
|
||||
- JWT
|
||||
- QBImagePickerController
|
||||
- RSKImageCropper
|
||||
- SDWebImage
|
||||
- SSZipArchive
|
||||
|
||||
EXTERNAL SOURCES:
|
||||
CodePush:
|
||||
@ -146,24 +156,21 @@ EXTERNAL SOURCES:
|
||||
SPEC CHECKSUMS:
|
||||
AppCenter: 02c5efe08d3bbab5421ae41d5f7aa7238906b817
|
||||
AppCenterReactNativeShared: 00f470c3c17b47718db8733cbe811ccdcfd86282
|
||||
Base64: cecfb41a004124895a7bcee567a89bae5a89d49b
|
||||
boost-for-react-native: 39c7adb57c4e60d6c5479dd8623128eb5b3f0f2c
|
||||
CodePush: 300e90b3481fa82569090406165b83745de3613a
|
||||
CodePush: a503ca0caee269e68d8faaafe4414990ec282520
|
||||
DoubleConversion: bb338842f62ab1d708ceb63ec3d999f0f3d98ecd
|
||||
FLAnimatedImage: 4a0b56255d9b05f18b6dd7ee06871be5d3b89e31
|
||||
Folly: c89ac2d5c6ab169cd7397ef27485c44f35f742c7
|
||||
glog: e8acf0ebbf99759d3ff18c86c292a5898282dcde
|
||||
JWT: 05028b9624591bba9681bb57df2c5f95fa258bad
|
||||
Folly: de497beb10f102453a1afa9edbf8cf8a251890de
|
||||
glog: aefd1eb5dda2ab95ba0938556f34b98e2da3a60d
|
||||
QBImagePickerController: d54cf93db6decf26baf6ed3472f336ef35cae022
|
||||
React: adbac0757ce35e92fbd447ab98c810209d27d9b0
|
||||
React: 76e6aa2b87d05eb6cccb6926d72685c9a07df152
|
||||
react-native-fast-image: 6d50167ad4d68b59640ceead8c2bc4e58d91d8bd
|
||||
react-native-version-number: b415bbec6a13f2df62bf978e85bc0d699462f37f
|
||||
RNImageCropPicker: 754299983d07c450aec0834c0efe0ed114131ff3
|
||||
RSKImageCropper: 98296ad26b41753f796b6898d015509598f13d97
|
||||
SDWebImage: 3f3f0c02f09798048c47a5ed0a13f17b063572d8
|
||||
SSZipArchive: fa16b8cc4cdeceb698e5e5d9f67e9558532fbf23
|
||||
yoga: 74cdf036c30820443b25ade59916236b1e95ee93
|
||||
yoga: 92b2102c3d373d1a790db4ab761d2b0ffc634f64
|
||||
|
||||
PODFILE CHECKSUM: 701dfb65d49e483fee82795022cc4212b9b1528e
|
||||
PODFILE CHECKSUM: ab732d8c64d6afc103bba2b4ead467d6a8684996
|
||||
|
||||
COCOAPODS: 1.6.1
|
||||
|
34
ios/Pods/Base64/Base64/MF_Base64Additions.h
generated
34
ios/Pods/Base64/Base64/MF_Base64Additions.h
generated
@ -1,34 +0,0 @@
|
||||
//
|
||||
// MF_Base64Additions.h
|
||||
// Base64 -- RFC 4648 compatible implementation
|
||||
// see http://www.ietf.org/rfc/rfc4648.txt for more details
|
||||
//
|
||||
// Designed to be compiled with Automatic Reference Counting
|
||||
//
|
||||
// Created by Dave Poirier on 2012-06-14.
|
||||
// Public Domain
|
||||
// Hosted at https://github.com/ekscrypto/Base64
|
||||
//
|
||||
|
||||
#import <Foundation/Foundation.h>
|
||||
|
||||
@interface NSString (Base64Addition)
|
||||
+(NSString *)stringFromBase64String:(NSString *)base64String;
|
||||
+(NSString *)stringFromBase64UrlEncodedString:(NSString *)base64UrlEncodedString;
|
||||
-(NSString *)base64String;
|
||||
-(NSString *)base64UrlEncodedString;
|
||||
@end
|
||||
|
||||
@interface NSData (Base64Addition)
|
||||
+(NSData *)dataWithBase64String:(NSString *)base64String;
|
||||
+(NSData *)dataWithBase64UrlEncodedString:(NSString *)base64UrlEncodedString;
|
||||
-(NSString *)base64String;
|
||||
-(NSString *)base64UrlEncodedString;
|
||||
@end
|
||||
|
||||
@interface MF_Base64Codec : NSObject
|
||||
+(NSData *)dataFromBase64String:(NSString *)base64String;
|
||||
+(NSString *)base64StringFromData:(NSData *)data;
|
||||
+(NSString *)base64UrlEncodedStringFromBase64String:(NSString *)base64String;
|
||||
+(NSString *)base64StringFromBase64UrlEncodedString:(NSString *)base64UrlEncodedString;
|
||||
@end
|
252
ios/Pods/Base64/Base64/MF_Base64Additions.m
generated
252
ios/Pods/Base64/Base64/MF_Base64Additions.m
generated
@ -1,252 +0,0 @@
|
||||
//
|
||||
// MF_Base64Additions.m
|
||||
// Base64 -- RFC 4648 compatible implementation
|
||||
// see http://www.ietf.org/rfc/rfc4648.txt for more details
|
||||
//
|
||||
// Designed to be compiled with Automatic Reference Counting
|
||||
//
|
||||
// Created by Dave Poirier on 2012-06-14.
|
||||
// Public Domain
|
||||
// Hosted at https://github.com/ekscrypto/Base64
|
||||
//
|
||||
|
||||
#import "MF_Base64Additions.h"
|
||||
|
||||
@implementation MF_Base64Codec
|
||||
|
||||
+(NSString *)base64StringFromBase64UrlEncodedString:(NSString *)base64UrlEncodedString
|
||||
{
|
||||
NSString *s = base64UrlEncodedString;
|
||||
s = [s stringByReplacingOccurrencesOfString:@"-" withString:@"+"];
|
||||
s = [s stringByReplacingOccurrencesOfString:@"_" withString:@"/"];
|
||||
switch (s.length % 4) {
|
||||
case 2:
|
||||
s = [s stringByAppendingString:@"=="];
|
||||
break;
|
||||
case 3:
|
||||
s = [s stringByAppendingString:@"="];
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
+(NSString *)base64UrlEncodedStringFromBase64String:(NSString *)base64String
|
||||
{
|
||||
NSString *s = base64String;
|
||||
s = [s stringByReplacingOccurrencesOfString:@"=" withString:@""];
|
||||
s = [s stringByReplacingOccurrencesOfString:@"+" withString:@"-"];
|
||||
s = [s stringByReplacingOccurrencesOfString:@"/" withString:@"_"];
|
||||
return s;
|
||||
}
|
||||
|
||||
+(NSData *)dataFromBase64String:(NSString *)encoding
|
||||
{
|
||||
NSData *data = nil;
|
||||
unsigned char *decodedBytes = NULL;
|
||||
@try {
|
||||
#define __ 255
|
||||
static char decodingTable[256] = {
|
||||
__,__,__,__, __,__,__,__, __,__,__,__, __,__,__,__, // 0x00 - 0x0F
|
||||
__,__,__,__, __,__,__,__, __,__,__,__, __,__,__,__, // 0x10 - 0x1F
|
||||
__,__,__,__, __,__,__,__, __,__,__,62, __,__,__,63, // 0x20 - 0x2F
|
||||
52,53,54,55, 56,57,58,59, 60,61,__,__, __, 0,__,__, // 0x30 - 0x3F
|
||||
__, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10, 11,12,13,14, // 0x40 - 0x4F
|
||||
15,16,17,18, 19,20,21,22, 23,24,25,__, __,__,__,__, // 0x50 - 0x5F
|
||||
__,26,27,28, 29,30,31,32, 33,34,35,36, 37,38,39,40, // 0x60 - 0x6F
|
||||
41,42,43,44, 45,46,47,48, 49,50,51,__, __,__,__,__, // 0x70 - 0x7F
|
||||
__,__,__,__, __,__,__,__, __,__,__,__, __,__,__,__, // 0x80 - 0x8F
|
||||
__,__,__,__, __,__,__,__, __,__,__,__, __,__,__,__, // 0x90 - 0x9F
|
||||
__,__,__,__, __,__,__,__, __,__,__,__, __,__,__,__, // 0xA0 - 0xAF
|
||||
__,__,__,__, __,__,__,__, __,__,__,__, __,__,__,__, // 0xB0 - 0xBF
|
||||
__,__,__,__, __,__,__,__, __,__,__,__, __,__,__,__, // 0xC0 - 0xCF
|
||||
__,__,__,__, __,__,__,__, __,__,__,__, __,__,__,__, // 0xD0 - 0xDF
|
||||
__,__,__,__, __,__,__,__, __,__,__,__, __,__,__,__, // 0xE0 - 0xEF
|
||||
__,__,__,__, __,__,__,__, __,__,__,__, __,__,__,__, // 0xF0 - 0xFF
|
||||
};
|
||||
encoding = [encoding stringByReplacingOccurrencesOfString:@"=" withString:@""];
|
||||
NSData *encodedData = [encoding dataUsingEncoding:NSASCIIStringEncoding];
|
||||
unsigned char *encodedBytes = (unsigned char *)[encodedData bytes];
|
||||
|
||||
NSUInteger encodedLength = [encodedData length];
|
||||
if( encodedLength >= (NSUIntegerMax - 3) ) return nil; // NSUInteger overflow check
|
||||
NSUInteger encodedBlocks = (encodedLength+3) >> 2;
|
||||
NSUInteger expectedDataLength = encodedBlocks * 3;
|
||||
|
||||
unsigned char decodingBlock[4];
|
||||
|
||||
decodedBytes = malloc(expectedDataLength);
|
||||
if( decodedBytes != NULL ) {
|
||||
|
||||
NSUInteger i = 0;
|
||||
NSUInteger j = 0;
|
||||
NSUInteger k = 0;
|
||||
unsigned char c;
|
||||
while( i < encodedLength ) {
|
||||
c = decodingTable[encodedBytes[i]];
|
||||
i++;
|
||||
if( c != __ ) {
|
||||
decodingBlock[j] = c;
|
||||
j++;
|
||||
if( j == 4 ) {
|
||||
decodedBytes[k] = (decodingBlock[0] << 2) | (decodingBlock[1] >> 4);
|
||||
decodedBytes[k+1] = (decodingBlock[1] << 4) | (decodingBlock[2] >> 2);
|
||||
decodedBytes[k+2] = (decodingBlock[2] << 6) | (decodingBlock[3]);
|
||||
j = 0;
|
||||
k += 3;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Process left over bytes, if any
|
||||
if( j == 3 ) {
|
||||
decodedBytes[k] = (decodingBlock[0] << 2) | (decodingBlock[1] >> 4);
|
||||
decodedBytes[k+1] = (decodingBlock[1] << 4) | (decodingBlock[2] >> 2);
|
||||
k += 2;
|
||||
} else if( j == 2 ) {
|
||||
decodedBytes[k] = (decodingBlock[0] << 2) | (decodingBlock[1] >> 4);
|
||||
k += 1;
|
||||
}
|
||||
data = [[NSData alloc] initWithBytes:decodedBytes length:k];
|
||||
}
|
||||
}
|
||||
@catch (NSException *exception) {
|
||||
data = nil;
|
||||
NSLog(@"WARNING: error occured while decoding base 32 string: %@", exception);
|
||||
}
|
||||
@finally {
|
||||
if( decodedBytes != NULL ) {
|
||||
free( decodedBytes );
|
||||
}
|
||||
}
|
||||
return data;
|
||||
}
|
||||
+(NSString *)base64StringFromData:(NSData *)data
|
||||
{
|
||||
NSString *encoding = nil;
|
||||
unsigned char *encodingBytes = NULL;
|
||||
@try {
|
||||
static char encodingTable[64] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
|
||||
static NSUInteger paddingTable[] = {0,2,1};
|
||||
// Table 1: The Base 64 Alphabet
|
||||
//
|
||||
// Value Encoding Value Encoding Value Encoding Value Encoding
|
||||
// 0 A 17 R 34 i 51 z
|
||||
// 1 B 18 S 35 j 52 0
|
||||
// 2 C 19 T 36 k 53 1
|
||||
// 3 D 20 U 37 l 54 2
|
||||
// 4 E 21 V 38 m 55 3
|
||||
// 5 F 22 W 39 n 56 4
|
||||
// 6 G 23 X 40 o 57 5
|
||||
// 7 H 24 Y 41 p 58 6
|
||||
// 8 I 25 Z 42 q 59 7
|
||||
// 9 J 26 a 43 r 60 8
|
||||
// 10 K 27 b 44 s 61 9
|
||||
// 11 L 28 c 45 t 62 +
|
||||
// 12 M 29 d 46 u 63 /
|
||||
// 13 N 30 e 47 v
|
||||
// 14 O 31 f 48 w (pad) =
|
||||
// 15 P 32 g 49 x
|
||||
// 16 Q 33 h 50 y
|
||||
|
||||
NSUInteger dataLength = [data length];
|
||||
NSUInteger encodedBlocks = dataLength / 3;
|
||||
if( (encodedBlocks + 1) >= (NSUIntegerMax / 4) ) return nil; // NSUInteger overflow check
|
||||
NSUInteger padding = paddingTable[dataLength % 3];
|
||||
if( padding > 0 ) encodedBlocks++;
|
||||
NSUInteger encodedLength = encodedBlocks * 4;
|
||||
|
||||
encodingBytes = malloc(encodedLength);
|
||||
if( encodingBytes != NULL ) {
|
||||
NSUInteger rawBytesToProcess = dataLength;
|
||||
NSUInteger rawBaseIndex = 0;
|
||||
NSUInteger encodingBaseIndex = 0;
|
||||
unsigned char *rawBytes = (unsigned char *)[data bytes];
|
||||
unsigned char rawByte1, rawByte2, rawByte3;
|
||||
while( rawBytesToProcess >= 3 ) {
|
||||
rawByte1 = rawBytes[rawBaseIndex];
|
||||
rawByte2 = rawBytes[rawBaseIndex+1];
|
||||
rawByte3 = rawBytes[rawBaseIndex+2];
|
||||
encodingBytes[encodingBaseIndex] = encodingTable[((rawByte1 >> 2) & 0x3F)];
|
||||
encodingBytes[encodingBaseIndex+1] = encodingTable[((rawByte1 << 4) & 0x30) | ((rawByte2 >> 4) & 0x0F) ];
|
||||
encodingBytes[encodingBaseIndex+2] = encodingTable[((rawByte2 << 2) & 0x3C) | ((rawByte3 >> 6) & 0x03) ];
|
||||
encodingBytes[encodingBaseIndex+3] = encodingTable[(rawByte3 & 0x3F)];
|
||||
|
||||
rawBaseIndex += 3;
|
||||
encodingBaseIndex += 4;
|
||||
rawBytesToProcess -= 3;
|
||||
}
|
||||
rawByte2 = 0;
|
||||
switch (dataLength-rawBaseIndex) {
|
||||
case 2:
|
||||
rawByte2 = rawBytes[rawBaseIndex+1];
|
||||
case 1:
|
||||
rawByte1 = rawBytes[rawBaseIndex];
|
||||
encodingBytes[encodingBaseIndex] = encodingTable[((rawByte1 >> 2) & 0x3F)];
|
||||
encodingBytes[encodingBaseIndex+1] = encodingTable[((rawByte1 << 4) & 0x30) | ((rawByte2 >> 4) & 0x0F) ];
|
||||
encodingBytes[encodingBaseIndex+2] = encodingTable[((rawByte2 << 2) & 0x3C) ];
|
||||
// we can skip rawByte3 since we have a partial block it would always be 0
|
||||
break;
|
||||
}
|
||||
// compute location from where to begin inserting padding, it may overwrite some bytes from the partial block encoding
|
||||
// if their value was 0 (cases 1-2).
|
||||
encodingBaseIndex = encodedLength - padding;
|
||||
while( padding-- > 0 ) {
|
||||
encodingBytes[encodingBaseIndex++] = '=';
|
||||
}
|
||||
encoding = [[NSString alloc] initWithBytes:encodingBytes length:encodedLength encoding:NSASCIIStringEncoding];
|
||||
}
|
||||
}
|
||||
@catch (NSException *exception) {
|
||||
encoding = nil;
|
||||
NSLog(@"WARNING: error occured while tring to encode base 32 data: %@", exception);
|
||||
}
|
||||
@finally {
|
||||
if( encodingBytes != NULL ) {
|
||||
free( encodingBytes );
|
||||
}
|
||||
}
|
||||
return encoding;
|
||||
}
|
||||
@end
|
||||
|
||||
@implementation NSString (Base64Addition)
|
||||
-(NSString *)base64String
|
||||
{
|
||||
NSData *utf8encoding = [self dataUsingEncoding:NSUTF8StringEncoding];
|
||||
return [MF_Base64Codec base64StringFromData:utf8encoding];
|
||||
}
|
||||
-(NSString *)base64UrlEncodedString
|
||||
{
|
||||
return [MF_Base64Codec base64UrlEncodedStringFromBase64String:[self base64String]];
|
||||
}
|
||||
+(NSString *)stringFromBase64String:(NSString *)base64String
|
||||
{
|
||||
NSData *utf8encoding = [MF_Base64Codec dataFromBase64String:base64String];
|
||||
return [[NSString alloc] initWithData:utf8encoding encoding:NSUTF8StringEncoding];
|
||||
}
|
||||
+(NSString *)stringFromBase64UrlEncodedString:(NSString *)base64UrlEncodedString
|
||||
{
|
||||
return [self stringFromBase64String:[MF_Base64Codec base64StringFromBase64UrlEncodedString:base64UrlEncodedString]];
|
||||
}
|
||||
@end
|
||||
|
||||
@implementation NSData (Base64Addition)
|
||||
+(NSData *)dataWithBase64String:(NSString *)base64String
|
||||
{
|
||||
return [MF_Base64Codec dataFromBase64String:base64String];
|
||||
}
|
||||
+(NSData *)dataWithBase64UrlEncodedString:(NSString *)base64UrlEncodedString
|
||||
{
|
||||
return [self dataWithBase64String:[MF_Base64Codec base64StringFromBase64UrlEncodedString:base64UrlEncodedString]];
|
||||
}
|
||||
-(NSString *)base64String
|
||||
{
|
||||
return [MF_Base64Codec base64StringFromData:self];
|
||||
}
|
||||
-(NSString *)base64UrlEncodedString
|
||||
{
|
||||
return [MF_Base64Codec base64UrlEncodedStringFromBase64String:[self base64String]];
|
||||
}
|
||||
@end
|
47
ios/Pods/Base64/README.md
generated
47
ios/Pods/Base64/README.md
generated
@ -1,47 +0,0 @@
|
||||
[![CI Status](https://travis-ci.org/ekscrypto/Base64.svg?branch=master)](https://github.com/ekscrypto/Base64)
|
||||
|
||||
Base64 Additions for Objective-C on Mac OS X and iOS
|
||||
=======
|
||||
|
||||
|
||||
Usage
|
||||
----
|
||||
Open the Xcode project file, and drag MF_Base64Additions.m/.h into your project.
|
||||
|
||||
In files where you want to use Base64 encoding/decoding, simply include the header file and use one of the provided NSData or NSString additions.
|
||||
|
||||
Example use:
|
||||
#import "MF_Base64Additions.h"
|
||||
|
||||
NSString *helloWorld = @"Hello World";
|
||||
NSString *helloInBase64 = [helloWorld base64String];
|
||||
NSString *helloDecoded = [NSString stringFromBase64String:helloInBase64];
|
||||
|
||||
|
||||
|
||||
|
||||
Performance
|
||||
----
|
||||
* Encoding: Approximately 4 to 5 times faster than using the equivalent SecTransform.
|
||||
* Encoding: 30% faster than https://github.com/l4u/NSData-Base64
|
||||
* Decoding: 5% faster than using the equivalent SecTransform.
|
||||
* Decoding: 5% faster than https://github.com/l4u/NSData-Base64
|
||||
|
||||
|
||||
|
||||
Requirements
|
||||
-----
|
||||
* Compile with Automatic Reference Counting
|
||||
* Compatible with Mac OSX 10.6+ and iOS 4.0+
|
||||
|
||||
|
||||
|
||||
Implementation
|
||||
----
|
||||
* Implemented as per RFC 4648, see http://www.ietf.org/rfc/rfc4648.txt for more details.
|
||||
|
||||
|
||||
|
||||
Licensing
|
||||
----
|
||||
* Public Domain
|
95
ios/Pods/Folly/README.md
generated
95
ios/Pods/Folly/README.md
generated
@ -1,10 +1,12 @@
|
||||
Folly: Facebook Open-source Library
|
||||
-----------------------------------
|
||||
|
||||
[![Build Status](https://travis-ci.org/facebook/folly.svg?branch=master)](https://travis-ci.org/facebook/folly)
|
||||
|
||||
### What is `folly`?
|
||||
|
||||
Folly (acronymed loosely after Facebook Open Source Library) is a
|
||||
library of C++11 components designed with practicality and efficiency
|
||||
library of C++14 components designed with practicality and efficiency
|
||||
in mind. **Folly contains a variety of core library components used extensively
|
||||
at Facebook**. In particular, it's often a dependency of Facebook's other
|
||||
open source C++ efforts and place where those projects can share code.
|
||||
@ -70,19 +72,39 @@ Folly is published on Github at https://github.com/facebook/folly
|
||||
|
||||
#### Dependencies
|
||||
|
||||
folly requires gcc 4.8+ and a version of boost compiled with C++11 support.
|
||||
folly requires gcc 4.9+ and a version of boost compiled with C++14 support.
|
||||
|
||||
Please download googletest from
|
||||
https://googletest.googlecode.com/files/gtest-1.7.0.zip and unzip it in the
|
||||
folly/test subdirectory.
|
||||
googletest is required to build and run folly's tests. You can download
|
||||
it from https://github.com/google/googletest/archive/release-1.8.0.tar.gz
|
||||
The following commands can be used to download and install it:
|
||||
|
||||
#### Ubuntu 12.04
|
||||
```
|
||||
wget https://github.com/google/googletest/archive/release-1.8.0.tar.gz && \
|
||||
tar zxf release-1.8.0.tar.gz && \
|
||||
rm -f release-1.8.0.tar.gz && \
|
||||
cd googletest-release-1.8.0 && \
|
||||
cmake . && \
|
||||
make && \
|
||||
make install
|
||||
```
|
||||
|
||||
This release is old, requiring many upgrades. However, since Travis CI runs
|
||||
on 12.04, `folly/build/deps_ubuntu_12.04.sh` is provided, and upgrades all
|
||||
the required packages.
|
||||
#### Finding dependencies in non-default locations
|
||||
|
||||
#### Ubuntu 13.10
|
||||
If you have boost, gtest, or other dependencies installed in a non-default
|
||||
location, you can use the `CMAKE_INCLUDE_PATH` and `CMAKE_LIBRARY_PATH`
|
||||
variables to make CMAKE look also look for header files and libraries in
|
||||
non-standard locations. For example, to also search the directories
|
||||
`/alt/include/path1` and `/alt/include/path2` for header files and the
|
||||
directories `/alt/lib/path1` and `/alt/lib/path2` for libraries, you can invoke
|
||||
`cmake` as follows:
|
||||
|
||||
```
|
||||
cmake \
|
||||
-DCMAKE_INCLUDE_PATH=/alt/include/path1:/alt/include/path2 \
|
||||
-DCMAKE_LIBRARY_PATH=/alt/lib/path1:/alt/lib/path2 ...
|
||||
```
|
||||
|
||||
#### Ubuntu 16.04 LTS
|
||||
|
||||
The following packages are required (feel free to cut and paste the apt-get
|
||||
command below):
|
||||
@ -90,15 +112,13 @@ command below):
|
||||
```
|
||||
sudo apt-get install \
|
||||
g++ \
|
||||
automake \
|
||||
autoconf \
|
||||
autoconf-archive \
|
||||
libtool \
|
||||
cmake \
|
||||
libboost-all-dev \
|
||||
libevent-dev \
|
||||
libdouble-conversion-dev \
|
||||
libgoogle-glog-dev \
|
||||
libgflags-dev \
|
||||
libiberty-dev \
|
||||
liblz4-dev \
|
||||
liblzma-dev \
|
||||
libsnappy-dev \
|
||||
@ -106,10 +126,11 @@ sudo apt-get install \
|
||||
zlib1g-dev \
|
||||
binutils-dev \
|
||||
libjemalloc-dev \
|
||||
libssl-dev
|
||||
libssl-dev \
|
||||
pkg-config
|
||||
```
|
||||
|
||||
If advanced debugging functionality is required
|
||||
If advanced debugging functionality is required, use:
|
||||
|
||||
```
|
||||
sudo apt-get install \
|
||||
@ -118,24 +139,12 @@ sudo apt-get install \
|
||||
libdwarf-dev
|
||||
```
|
||||
|
||||
#### Ubuntu 14.04 LTS
|
||||
|
||||
The packages listed above for Ubuntu 13.10 are required, as well as:
|
||||
|
||||
In the folly directory, run:
|
||||
```
|
||||
sudo apt-get install \
|
||||
libiberty-dev
|
||||
```
|
||||
|
||||
The above packages are sufficient for Ubuntu 13.10 and Ubuntu 14.04.
|
||||
|
||||
In the folly directory, run
|
||||
```
|
||||
autoreconf -ivf
|
||||
./configure
|
||||
make
|
||||
make check
|
||||
sudo make install
|
||||
mkdir _build && cd _build
|
||||
cmake ..
|
||||
make -j $(nproc)
|
||||
make install
|
||||
```
|
||||
|
||||
#### OS X (Homebrew)
|
||||
@ -147,8 +156,6 @@ You may also use `folly/build/bootstrap-osx-homebrew.sh` to build against `maste
|
||||
```
|
||||
cd folly
|
||||
./build/bootstrap-osx-homebrew.sh
|
||||
make
|
||||
make check
|
||||
```
|
||||
|
||||
#### OS X (MacPorts)
|
||||
@ -193,26 +200,32 @@ Download and install folly with the parameters listed below:
|
||||
sudo make install
|
||||
```
|
||||
|
||||
#### Windows (Vcpkg)
|
||||
|
||||
folly is available in [Vcpkg](https://github.com/Microsoft/vcpkg#vcpkg) and releases may be built via `vcpkg install folly:x64-windows`.
|
||||
|
||||
You may also use `vcpkg install folly:x64-windows --head` to build against `master`.
|
||||
|
||||
#### Other Linux distributions
|
||||
|
||||
- double-conversion (https://github.com/google/double-conversion)
|
||||
|
||||
Download and build double-conversion.
|
||||
You may need to tell configure where to find it.
|
||||
You may need to tell cmake where to find it.
|
||||
|
||||
[double-conversion/] `ln -s src double-conversion`
|
||||
|
||||
[folly/] `./configure LDFLAGS=-L$DOUBLE_CONVERSION_HOME/ CPPFLAGS=-I$DOUBLE_CONVERSION_HOME/`
|
||||
[folly/] `mkdir build && cd build`
|
||||
[folly/build/] `cmake "-DCMAKE_INCLUDE_PATH=$DOUBLE_CONVERSION_HOME/include" "-DCMAKE_LIBRARY_PATH=$DOUBLE_CONVERSION_HOME/lib" ..`
|
||||
|
||||
[folly/] `LD_LIBRARY_PATH=$DOUBLE_CONVERSION_HOME/ make`
|
||||
[folly/build/] `make`
|
||||
|
||||
- additional platform specific dependencies:
|
||||
|
||||
Fedora 21 64-bit
|
||||
Fedora >= 21 64-bit (last tested on Fedora 28 64-bit)
|
||||
- gcc
|
||||
- gcc-c++
|
||||
- autoconf
|
||||
- autoconf-archive
|
||||
- cmake
|
||||
- automake
|
||||
- boost-devel
|
||||
- libtool
|
||||
|
124
ios/Pods/Folly/folly/ApplyTuple.h
generated
124
ios/Pods/Folly/folly/ApplyTuple.h
generated
@ -1,124 +0,0 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Defines a function folly::applyTuple, which takes a function and a
|
||||
* std::tuple of arguments and calls the function with those
|
||||
* arguments.
|
||||
*
|
||||
* Example:
|
||||
*
|
||||
* int x = folly::applyTuple(std::plus<int>(), std::make_tuple(12, 12));
|
||||
* ASSERT(x == 24);
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <functional>
|
||||
#include <tuple>
|
||||
#include <utility>
|
||||
|
||||
namespace folly {
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
||||
namespace detail {
|
||||
namespace apply_tuple {
|
||||
|
||||
template <std::size_t...>
|
||||
struct IndexSequence {};
|
||||
|
||||
template <std::size_t N, std::size_t... Is>
|
||||
struct MakeIndexSequence : MakeIndexSequence<N - 1, N - 1, Is...> {};
|
||||
|
||||
template <std::size_t... Is>
|
||||
struct MakeIndexSequence<0, Is...> : IndexSequence<Is...> {};
|
||||
|
||||
inline constexpr std::size_t sum() {
|
||||
return 0;
|
||||
}
|
||||
template <typename... Args>
|
||||
inline constexpr std::size_t sum(std::size_t v1, Args... vs) {
|
||||
return v1 + sum(vs...);
|
||||
}
|
||||
|
||||
template <typename... Tuples>
|
||||
struct TupleSizeSum {
|
||||
static constexpr auto value = sum(std::tuple_size<Tuples>::value...);
|
||||
};
|
||||
|
||||
template <typename... Tuples>
|
||||
using MakeIndexSequenceFromTuple = MakeIndexSequence<
|
||||
TupleSizeSum<typename std::decay<Tuples>::type...>::value>;
|
||||
|
||||
// This is to allow using this with pointers to member functions,
|
||||
// where the first argument in the tuple will be the this pointer.
|
||||
template <class F>
|
||||
inline constexpr F&& makeCallable(F&& f) {
|
||||
return std::forward<F>(f);
|
||||
}
|
||||
template <class M, class C>
|
||||
inline constexpr auto makeCallable(M(C::*d)) -> decltype(std::mem_fn(d)) {
|
||||
return std::mem_fn(d);
|
||||
}
|
||||
|
||||
template <class F, class Tuple, std::size_t... Indexes>
|
||||
inline constexpr auto call(F&& f, Tuple&& t, IndexSequence<Indexes...>)
|
||||
-> decltype(
|
||||
std::forward<F>(f)(std::get<Indexes>(std::forward<Tuple>(t))...)) {
|
||||
return std::forward<F>(f)(std::get<Indexes>(std::forward<Tuple>(t))...);
|
||||
}
|
||||
|
||||
template <class Tuple, std::size_t... Indexes>
|
||||
inline constexpr auto forwardTuple(Tuple&& t, IndexSequence<Indexes...>)
|
||||
-> decltype(
|
||||
std::forward_as_tuple(std::get<Indexes>(std::forward<Tuple>(t))...)) {
|
||||
return std::forward_as_tuple(std::get<Indexes>(std::forward<Tuple>(t))...);
|
||||
}
|
||||
|
||||
} // namespace apply_tuple
|
||||
} // namespace detail
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
||||
/**
|
||||
* Invoke a callable object with a set of arguments passed as a tuple, or a
|
||||
* series of tuples
|
||||
*
|
||||
* Example: the following lines are equivalent
|
||||
* func(1, 2, 3, "foo");
|
||||
* applyTuple(func, std::make_tuple(1, 2, 3, "foo"));
|
||||
* applyTuple(func, std::make_tuple(1, 2), std::make_tuple(3, "foo"));
|
||||
*/
|
||||
|
||||
template <class F, class... Tuples>
|
||||
inline constexpr auto applyTuple(F&& f, Tuples&&... t)
|
||||
-> decltype(detail::apply_tuple::call(
|
||||
detail::apply_tuple::makeCallable(std::forward<F>(f)),
|
||||
std::tuple_cat(detail::apply_tuple::forwardTuple(
|
||||
std::forward<Tuples>(t),
|
||||
detail::apply_tuple::MakeIndexSequenceFromTuple<Tuples>{})...),
|
||||
detail::apply_tuple::MakeIndexSequenceFromTuple<Tuples...>{})) {
|
||||
return detail::apply_tuple::call(
|
||||
detail::apply_tuple::makeCallable(std::forward<F>(f)),
|
||||
std::tuple_cat(detail::apply_tuple::forwardTuple(
|
||||
std::forward<Tuples>(t),
|
||||
detail::apply_tuple::MakeIndexSequenceFromTuple<Tuples>{})...),
|
||||
detail::apply_tuple::MakeIndexSequenceFromTuple<Tuples...>{});
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
}
|
25
ios/Pods/Folly/folly/AtomicBitSet.h
generated
25
ios/Pods/Folly/folly/AtomicBitSet.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2013-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -67,15 +67,16 @@ class AtomicBitSet : private boost::noncopyable {
|
||||
* Yes, this is an overload of set(), to keep as close to std::bitset's
|
||||
* interface as possible.
|
||||
*/
|
||||
bool set(size_t idx,
|
||||
bool value,
|
||||
std::memory_order order = std::memory_order_seq_cst);
|
||||
bool set(
|
||||
size_t idx,
|
||||
bool value,
|
||||
std::memory_order order = std::memory_order_seq_cst);
|
||||
|
||||
/**
|
||||
* Read bit idx.
|
||||
*/
|
||||
bool test(size_t idx,
|
||||
std::memory_order order = std::memory_order_seq_cst) const;
|
||||
bool test(size_t idx, std::memory_order order = std::memory_order_seq_cst)
|
||||
const;
|
||||
|
||||
/**
|
||||
* Same as test() with the default memory order.
|
||||
@ -102,7 +103,7 @@ class AtomicBitSet : private boost::noncopyable {
|
||||
typedef std::atomic<BlockType> AtomicBlockType;
|
||||
|
||||
static constexpr size_t kBitsPerBlock =
|
||||
std::numeric_limits<BlockType>::digits;
|
||||
std::numeric_limits<BlockType>::digits;
|
||||
|
||||
static constexpr size_t blockIndex(size_t bit) {
|
||||
return bit / kBitsPerBlock;
|
||||
@ -120,8 +121,7 @@ class AtomicBitSet : private boost::noncopyable {
|
||||
|
||||
// value-initialize to zero
|
||||
template <size_t N>
|
||||
inline AtomicBitSet<N>::AtomicBitSet() : data_() {
|
||||
}
|
||||
inline AtomicBitSet<N>::AtomicBitSet() : data_() {}
|
||||
|
||||
template <size_t N>
|
||||
inline bool AtomicBitSet<N>::set(size_t idx, std::memory_order order) {
|
||||
@ -138,9 +138,8 @@ inline bool AtomicBitSet<N>::reset(size_t idx, std::memory_order order) {
|
||||
}
|
||||
|
||||
template <size_t N>
|
||||
inline bool AtomicBitSet<N>::set(size_t idx,
|
||||
bool value,
|
||||
std::memory_order order) {
|
||||
inline bool
|
||||
AtomicBitSet<N>::set(size_t idx, bool value, std::memory_order order) {
|
||||
return value ? set(idx, order) : reset(idx, order);
|
||||
}
|
||||
|
||||
@ -156,4 +155,4 @@ inline bool AtomicBitSet<N>::operator[](size_t idx) const {
|
||||
return test(idx);
|
||||
}
|
||||
|
||||
} // namespaces
|
||||
} // namespace folly
|
||||
|
323
ios/Pods/Folly/folly/AtomicHashArray-inl.h
generated
323
ios/Pods/Folly/folly/AtomicHashArray-inl.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2012-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -20,24 +20,45 @@
|
||||
|
||||
#include <type_traits>
|
||||
|
||||
#include <folly/Bits.h>
|
||||
#include <folly/detail/AtomicHashUtils.h>
|
||||
#include <folly/lang/Bits.h>
|
||||
|
||||
namespace folly {
|
||||
|
||||
// AtomicHashArray private constructor --
|
||||
template <class KeyT, class ValueT, class HashFcn, class EqualFcn,
|
||||
class Allocator, class ProbeFcn, class KeyConvertFcn>
|
||||
AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
Allocator, ProbeFcn, KeyConvertFcn>::
|
||||
AtomicHashArray(size_t capacity, KeyT emptyKey, KeyT lockedKey,
|
||||
KeyT erasedKey, double _maxLoadFactor, size_t cacheSize)
|
||||
template <
|
||||
class KeyT,
|
||||
class ValueT,
|
||||
class HashFcn,
|
||||
class EqualFcn,
|
||||
class Allocator,
|
||||
class ProbeFcn,
|
||||
class KeyConvertFcn>
|
||||
AtomicHashArray<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn,
|
||||
KeyConvertFcn>::
|
||||
AtomicHashArray(
|
||||
size_t capacity,
|
||||
KeyT emptyKey,
|
||||
KeyT lockedKey,
|
||||
KeyT erasedKey,
|
||||
double _maxLoadFactor,
|
||||
uint32_t cacheSize)
|
||||
: capacity_(capacity),
|
||||
maxEntries_(size_t(_maxLoadFactor * capacity_ + 0.5)),
|
||||
kEmptyKey_(emptyKey), kLockedKey_(lockedKey), kErasedKey_(erasedKey),
|
||||
kAnchorMask_(nextPowTwo(capacity_) - 1), numEntries_(0, cacheSize),
|
||||
numPendingEntries_(0, cacheSize), isFull_(0), numErases_(0) {
|
||||
}
|
||||
kEmptyKey_(emptyKey),
|
||||
kLockedKey_(lockedKey),
|
||||
kErasedKey_(erasedKey),
|
||||
kAnchorMask_(nextPowTwo(capacity_) - 1),
|
||||
numEntries_(0, cacheSize),
|
||||
numPendingEntries_(0, cacheSize),
|
||||
isFull_(0),
|
||||
numErases_(0) {}
|
||||
|
||||
/*
|
||||
* findInternal --
|
||||
@ -46,18 +67,35 @@ AtomicHashArray(size_t capacity, KeyT emptyKey, KeyT lockedKey,
|
||||
* of key and returns true, or if key does not exist returns false and
|
||||
* ret.index is set to capacity_.
|
||||
*/
|
||||
template <class KeyT, class ValueT, class HashFcn, class EqualFcn,
|
||||
class Allocator, class ProbeFcn, class KeyConvertFcn>
|
||||
template <
|
||||
class KeyT,
|
||||
class ValueT,
|
||||
class HashFcn,
|
||||
class EqualFcn,
|
||||
class Allocator,
|
||||
class ProbeFcn,
|
||||
class KeyConvertFcn>
|
||||
template <class LookupKeyT, class LookupHashFcn, class LookupEqualFcn>
|
||||
typename AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
Allocator, ProbeFcn, KeyConvertFcn>::SimpleRetT
|
||||
AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
Allocator, ProbeFcn, KeyConvertFcn>::
|
||||
findInternal(const LookupKeyT key_in) {
|
||||
typename AtomicHashArray<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn,
|
||||
KeyConvertFcn>::SimpleRetT
|
||||
AtomicHashArray<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn,
|
||||
KeyConvertFcn>::findInternal(const LookupKeyT key_in) {
|
||||
checkLegalKeyIfKey<LookupKeyT>(key_in);
|
||||
|
||||
for (size_t idx = keyToAnchorIdx<LookupKeyT, LookupHashFcn>(key_in),
|
||||
numProbes = 0;
|
||||
numProbes = 0;
|
||||
;
|
||||
idx = ProbeFcn()(idx, numProbes, capacity_)) {
|
||||
const KeyT key = acquireLoadKey(cells_[idx]);
|
||||
@ -88,18 +126,36 @@ findInternal(const LookupKeyT key_in) {
|
||||
* this will be the previously inserted value, and if the map is full it is
|
||||
* default.
|
||||
*/
|
||||
template <class KeyT, class ValueT, class HashFcn, class EqualFcn,
|
||||
class Allocator, class ProbeFcn, class KeyConvertFcn>
|
||||
template <typename LookupKeyT,
|
||||
typename LookupHashFcn,
|
||||
typename LookupEqualFcn,
|
||||
typename LookupKeyToKeyFcn,
|
||||
typename... ArgTs>
|
||||
typename AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
Allocator, ProbeFcn, KeyConvertFcn>::SimpleRetT
|
||||
AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
Allocator, ProbeFcn, KeyConvertFcn>::
|
||||
insertInternal(LookupKeyT key_in, ArgTs&&... vCtorArgs) {
|
||||
template <
|
||||
class KeyT,
|
||||
class ValueT,
|
||||
class HashFcn,
|
||||
class EqualFcn,
|
||||
class Allocator,
|
||||
class ProbeFcn,
|
||||
class KeyConvertFcn>
|
||||
template <
|
||||
typename LookupKeyT,
|
||||
typename LookupHashFcn,
|
||||
typename LookupEqualFcn,
|
||||
typename LookupKeyToKeyFcn,
|
||||
typename... ArgTs>
|
||||
typename AtomicHashArray<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn,
|
||||
KeyConvertFcn>::SimpleRetT
|
||||
AtomicHashArray<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn,
|
||||
KeyConvertFcn>::insertInternal(LookupKeyT key_in, ArgTs&&... vCtorArgs) {
|
||||
const short NO_NEW_INSERTS = 1;
|
||||
const short NO_PENDING_INSERTS = 2;
|
||||
checkLegalKeyIfKey<LookupKeyT>(key_in);
|
||||
@ -125,9 +181,9 @@ insertInternal(LookupKeyT key_in, ArgTs&&... vCtorArgs) {
|
||||
// to pass the isFull_.load() test above. (It shouldn't insert
|
||||
// a new entry.)
|
||||
detail::atomic_hash_spin_wait([&] {
|
||||
return
|
||||
(isFull_.load(std::memory_order_acquire) != NO_PENDING_INSERTS) &&
|
||||
(numPendingEntries_.readFull() != 0);
|
||||
return (isFull_.load(std::memory_order_acquire) !=
|
||||
NO_PENDING_INSERTS) &&
|
||||
(numPendingEntries_.readFull() != 0);
|
||||
});
|
||||
isFull_.store(NO_PENDING_INSERTS, std::memory_order_release);
|
||||
|
||||
@ -144,10 +200,10 @@ insertInternal(LookupKeyT key_in, ArgTs&&... vCtorArgs) {
|
||||
// Write the value - done before unlocking
|
||||
try {
|
||||
key_new = LookupKeyToKeyFcn()(key_in);
|
||||
typedef typename std::remove_const<LookupKeyT>::type
|
||||
LookupKeyTNoConst;
|
||||
typedef
|
||||
typename std::remove_const<LookupKeyT>::type LookupKeyTNoConst;
|
||||
constexpr bool kAlreadyChecked =
|
||||
std::is_same<KeyT, LookupKeyTNoConst>::value;
|
||||
std::is_same<KeyT, LookupKeyTNoConst>::value;
|
||||
if (!kAlreadyChecked) {
|
||||
checkLegalKeyIfKey(key_new);
|
||||
}
|
||||
@ -168,10 +224,11 @@ insertInternal(LookupKeyT key_in, ArgTs&&... vCtorArgs) {
|
||||
// An erase() can race here and delete right after our insertion
|
||||
// Direct comparison rather than EqualFcn ok here
|
||||
// (we just inserted it)
|
||||
DCHECK(relaxedLoadKey(*cell) == key_new ||
|
||||
relaxedLoadKey(*cell) == kErasedKey_);
|
||||
DCHECK(
|
||||
relaxedLoadKey(*cell) == key_new ||
|
||||
relaxedLoadKey(*cell) == kErasedKey_);
|
||||
--numPendingEntries_;
|
||||
++numEntries_; // This is a thread cached atomic increment :)
|
||||
++numEntries_; // This is a thread cached atomic increment :)
|
||||
if (numEntries_.readFast() >= maxEntries_) {
|
||||
isFull_.store(NO_NEW_INSERTS, std::memory_order_relaxed);
|
||||
}
|
||||
@ -182,9 +239,8 @@ insertInternal(LookupKeyT key_in, ArgTs&&... vCtorArgs) {
|
||||
}
|
||||
DCHECK(relaxedLoadKey(*cell) != kEmptyKey_);
|
||||
if (kLockedKey_ == acquireLoadKey(*cell)) {
|
||||
detail::atomic_hash_spin_wait([&] {
|
||||
return kLockedKey_ == acquireLoadKey(*cell);
|
||||
});
|
||||
detail::atomic_hash_spin_wait(
|
||||
[&] { return kLockedKey_ == acquireLoadKey(*cell); });
|
||||
}
|
||||
|
||||
const KeyT thisKey = acquireLoadKey(*cell);
|
||||
@ -199,7 +255,6 @@ insertInternal(LookupKeyT key_in, ArgTs&&... vCtorArgs) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
// NOTE: the way we count numProbes must be same in find(),
|
||||
// insert(), and erase(). Otherwise it may break probing.
|
||||
++numProbes;
|
||||
@ -222,17 +277,27 @@ insertInternal(LookupKeyT key_in, ArgTs&&... vCtorArgs) {
|
||||
* erased key will never be reused. If there's an associated value, we won't
|
||||
* touch it either.
|
||||
*/
|
||||
template <class KeyT, class ValueT, class HashFcn, class EqualFcn,
|
||||
class Allocator, class ProbeFcn, class KeyConvertFcn>
|
||||
size_t AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
Allocator, ProbeFcn, KeyConvertFcn>::
|
||||
erase(KeyT key_in) {
|
||||
template <
|
||||
class KeyT,
|
||||
class ValueT,
|
||||
class HashFcn,
|
||||
class EqualFcn,
|
||||
class Allocator,
|
||||
class ProbeFcn,
|
||||
class KeyConvertFcn>
|
||||
size_t AtomicHashArray<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn,
|
||||
KeyConvertFcn>::erase(KeyT key_in) {
|
||||
CHECK_NE(key_in, kEmptyKey_);
|
||||
CHECK_NE(key_in, kLockedKey_);
|
||||
CHECK_NE(key_in, kErasedKey_);
|
||||
|
||||
for (size_t idx = keyToAnchorIdx(key_in), numProbes = 0;
|
||||
;
|
||||
for (size_t idx = keyToAnchorIdx(key_in), numProbes = 0;;
|
||||
idx = ProbeFcn()(idx, numProbes, capacity_)) {
|
||||
DCHECK_LT(idx, capacity_);
|
||||
value_type* cell = &cells_[idx];
|
||||
@ -271,13 +336,30 @@ erase(KeyT key_in) {
|
||||
}
|
||||
}
|
||||
|
||||
template <class KeyT, class ValueT, class HashFcn, class EqualFcn,
|
||||
class Allocator, class ProbeFcn, class KeyConvertFcn>
|
||||
typename AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
Allocator, ProbeFcn, KeyConvertFcn>::SmartPtr
|
||||
AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
Allocator, ProbeFcn, KeyConvertFcn>::
|
||||
create(size_t maxSize, const Config& c) {
|
||||
template <
|
||||
class KeyT,
|
||||
class ValueT,
|
||||
class HashFcn,
|
||||
class EqualFcn,
|
||||
class Allocator,
|
||||
class ProbeFcn,
|
||||
class KeyConvertFcn>
|
||||
typename AtomicHashArray<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn,
|
||||
KeyConvertFcn>::SmartPtr
|
||||
AtomicHashArray<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn,
|
||||
KeyConvertFcn>::create(size_t maxSize, const Config& c) {
|
||||
CHECK_LE(c.maxLoadFactor, 1.0);
|
||||
CHECK_GT(c.maxLoadFactor, 0.0);
|
||||
CHECK_NE(c.emptyKey, c.lockedKey);
|
||||
@ -286,14 +368,19 @@ create(size_t maxSize, const Config& c) {
|
||||
|
||||
auto const mem = Allocator().allocate(sz);
|
||||
try {
|
||||
new (mem) AtomicHashArray(capacity, c.emptyKey, c.lockedKey, c.erasedKey,
|
||||
c.maxLoadFactor, c.entryCountThreadCacheSize);
|
||||
new (mem) AtomicHashArray(
|
||||
capacity,
|
||||
c.emptyKey,
|
||||
c.lockedKey,
|
||||
c.erasedKey,
|
||||
c.maxLoadFactor,
|
||||
c.entryCountThreadCacheSize);
|
||||
} catch (...) {
|
||||
Allocator().deallocate(mem, sz);
|
||||
throw;
|
||||
}
|
||||
|
||||
SmartPtr map(static_cast<AtomicHashArray*>((void *)mem));
|
||||
SmartPtr map(static_cast<AtomicHashArray*>((void*)mem));
|
||||
|
||||
/*
|
||||
* Mark all cells as empty.
|
||||
@ -307,39 +394,61 @@ create(size_t maxSize, const Config& c) {
|
||||
* have an expensive default constructor for the value type this can
|
||||
* noticeably speed construction time for an AHA.
|
||||
*/
|
||||
FOR_EACH_RANGE(i, 0, map->capacity_) {
|
||||
cellKeyPtr(map->cells_[i])->store(map->kEmptyKey_,
|
||||
std::memory_order_relaxed);
|
||||
FOR_EACH_RANGE (i, 0, map->capacity_) {
|
||||
cellKeyPtr(map->cells_[i])
|
||||
->store(map->kEmptyKey_, std::memory_order_relaxed);
|
||||
}
|
||||
return map;
|
||||
}
|
||||
|
||||
template <class KeyT, class ValueT, class HashFcn, class EqualFcn,
|
||||
class Allocator, class ProbeFcn, class KeyConvertFcn>
|
||||
void AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
Allocator, ProbeFcn, KeyConvertFcn>::
|
||||
destroy(AtomicHashArray* p) {
|
||||
template <
|
||||
class KeyT,
|
||||
class ValueT,
|
||||
class HashFcn,
|
||||
class EqualFcn,
|
||||
class Allocator,
|
||||
class ProbeFcn,
|
||||
class KeyConvertFcn>
|
||||
void AtomicHashArray<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn,
|
||||
KeyConvertFcn>::destroy(AtomicHashArray* p) {
|
||||
assert(p);
|
||||
|
||||
size_t sz = sizeof(AtomicHashArray) + sizeof(value_type) * p->capacity_;
|
||||
|
||||
FOR_EACH_RANGE(i, 0, p->capacity_) {
|
||||
FOR_EACH_RANGE (i, 0, p->capacity_) {
|
||||
if (p->cells_[i].first != p->kEmptyKey_) {
|
||||
p->cells_[i].~value_type();
|
||||
}
|
||||
}
|
||||
p->~AtomicHashArray();
|
||||
|
||||
Allocator().deallocate((char *)p, sz);
|
||||
Allocator().deallocate((char*)p, sz);
|
||||
}
|
||||
|
||||
// clear -- clears all keys and values in the map and resets all counters
|
||||
template <class KeyT, class ValueT, class HashFcn, class EqualFcn,
|
||||
class Allocator, class ProbeFcn, class KeyConvertFcn>
|
||||
void AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
Allocator, ProbeFcn, KeyConvertFcn>::
|
||||
clear() {
|
||||
FOR_EACH_RANGE(i, 0, capacity_) {
|
||||
template <
|
||||
class KeyT,
|
||||
class ValueT,
|
||||
class HashFcn,
|
||||
class EqualFcn,
|
||||
class Allocator,
|
||||
class ProbeFcn,
|
||||
class KeyConvertFcn>
|
||||
void AtomicHashArray<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn,
|
||||
KeyConvertFcn>::clear() {
|
||||
FOR_EACH_RANGE (i, 0, capacity_) {
|
||||
if (cells_[i].first != kEmptyKey_) {
|
||||
cells_[i].~value_type();
|
||||
*const_cast<KeyT*>(&cells_[i].first) = kEmptyKey_;
|
||||
@ -352,41 +461,50 @@ clear() {
|
||||
numErases_.store(0, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
|
||||
// Iterator implementation
|
||||
|
||||
template <class KeyT, class ValueT, class HashFcn, class EqualFcn,
|
||||
class Allocator, class ProbeFcn, class KeyConvertFcn>
|
||||
template <
|
||||
class KeyT,
|
||||
class ValueT,
|
||||
class HashFcn,
|
||||
class EqualFcn,
|
||||
class Allocator,
|
||||
class ProbeFcn,
|
||||
class KeyConvertFcn>
|
||||
template <class ContT, class IterVal>
|
||||
struct AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
Allocator, ProbeFcn, KeyConvertFcn>::
|
||||
aha_iterator
|
||||
: boost::iterator_facade<aha_iterator<ContT,IterVal>,
|
||||
IterVal,
|
||||
boost::forward_traversal_tag>
|
||||
{
|
||||
explicit aha_iterator() : aha_(0) {}
|
||||
struct AtomicHashArray<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn,
|
||||
KeyConvertFcn>::aha_iterator
|
||||
: boost::iterator_facade<
|
||||
aha_iterator<ContT, IterVal>,
|
||||
IterVal,
|
||||
boost::forward_traversal_tag> {
|
||||
explicit aha_iterator() : aha_(nullptr) {}
|
||||
|
||||
// Conversion ctor for interoperability between const_iterator and
|
||||
// iterator. The enable_if<> magic keeps us well-behaved for
|
||||
// is_convertible<> (v. the iterator_facade documentation).
|
||||
template<class OtherContT, class OtherVal>
|
||||
aha_iterator(const aha_iterator<OtherContT,OtherVal>& o,
|
||||
typename std::enable_if<
|
||||
std::is_convertible<OtherVal*,IterVal*>::value >::type* = 0)
|
||||
: aha_(o.aha_)
|
||||
, offset_(o.offset_)
|
||||
{}
|
||||
template <class OtherContT, class OtherVal>
|
||||
aha_iterator(
|
||||
const aha_iterator<OtherContT, OtherVal>& o,
|
||||
typename std::enable_if<
|
||||
std::is_convertible<OtherVal*, IterVal*>::value>::type* = nullptr)
|
||||
: aha_(o.aha_), offset_(o.offset_) {}
|
||||
|
||||
explicit aha_iterator(ContT* array, size_t offset)
|
||||
: aha_(array)
|
||||
, offset_(offset)
|
||||
{}
|
||||
: aha_(array), offset_(offset) {}
|
||||
|
||||
// Returns unique index that can be used with findAt().
|
||||
// WARNING: The following function will fail silently for hashtable
|
||||
// with capacity > 2^32
|
||||
uint32_t getIndex() const { return offset_; }
|
||||
uint32_t getIndex() const {
|
||||
return offset_;
|
||||
}
|
||||
|
||||
void advancePastEmpty() {
|
||||
while (offset_ < aha_->capacity_ && !isValid()) {
|
||||
@ -413,9 +531,8 @@ struct AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
|
||||
bool isValid() const {
|
||||
KeyT key = acquireLoadKey(aha_->cells_[offset_]);
|
||||
return key != aha_->kEmptyKey_ &&
|
||||
key != aha_->kLockedKey_ &&
|
||||
key != aha_->kErasedKey_;
|
||||
return key != aha_->kEmptyKey_ && key != aha_->kLockedKey_ &&
|
||||
key != aha_->kErasedKey_;
|
||||
}
|
||||
|
||||
private:
|
||||
|
293
ios/Pods/Folly/folly/AtomicHashArray.h
generated
293
ios/Pods/Folly/folly/AtomicHashArray.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2012-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -37,16 +37,15 @@
|
||||
#include <boost/iterator/iterator_facade.hpp>
|
||||
#include <boost/noncopyable.hpp>
|
||||
|
||||
#include <folly/Hash.h>
|
||||
#include <folly/ThreadCachedInt.h>
|
||||
#include <folly/Utility.h>
|
||||
#include <folly/hash/Hash.h>
|
||||
|
||||
namespace folly {
|
||||
|
||||
struct AtomicHashArrayLinearProbeFcn
|
||||
{
|
||||
inline size_t operator()(size_t idx,
|
||||
size_t /* numProbes */,
|
||||
size_t capacity) const {
|
||||
struct AtomicHashArrayLinearProbeFcn {
|
||||
inline size_t operator()(size_t idx, size_t /* numProbes */, size_t capacity)
|
||||
const {
|
||||
idx += 1; // linear probing
|
||||
|
||||
// Avoid modulus because it's slow
|
||||
@ -54,9 +53,9 @@ struct AtomicHashArrayLinearProbeFcn
|
||||
}
|
||||
};
|
||||
|
||||
struct AtomicHashArrayQuadraticProbeFcn
|
||||
{
|
||||
inline size_t operator()(size_t idx, size_t numProbes, size_t capacity) const{
|
||||
struct AtomicHashArrayQuadraticProbeFcn {
|
||||
inline size_t operator()(size_t idx, size_t numProbes, size_t capacity)
|
||||
const {
|
||||
idx += numProbes; // quadratic probing
|
||||
|
||||
// Avoid modulus because it's slow
|
||||
@ -66,77 +65,77 @@ struct AtomicHashArrayQuadraticProbeFcn
|
||||
|
||||
// Enables specializing checkLegalKey without specializing its class.
|
||||
namespace detail {
|
||||
// Local copy of folly::gen::Identity, to avoid heavy dependencies.
|
||||
class AHAIdentity {
|
||||
public:
|
||||
template<class Value>
|
||||
auto operator()(Value&& value) const ->
|
||||
decltype(std::forward<Value>(value)) {
|
||||
return std::forward<Value>(value);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename NotKeyT, typename KeyT>
|
||||
inline void checkLegalKeyIfKeyTImpl(NotKeyT /* ignored */,
|
||||
KeyT /* emptyKey */,
|
||||
KeyT /* lockedKey */,
|
||||
KeyT /* erasedKey */) {}
|
||||
inline void checkLegalKeyIfKeyTImpl(
|
||||
NotKeyT /* ignored */,
|
||||
KeyT /* emptyKey */,
|
||||
KeyT /* lockedKey */,
|
||||
KeyT /* erasedKey */) {}
|
||||
|
||||
template <typename KeyT>
|
||||
inline void checkLegalKeyIfKeyTImpl(KeyT key_in, KeyT emptyKey,
|
||||
KeyT lockedKey, KeyT erasedKey) {
|
||||
inline void checkLegalKeyIfKeyTImpl(
|
||||
KeyT key_in,
|
||||
KeyT emptyKey,
|
||||
KeyT lockedKey,
|
||||
KeyT erasedKey) {
|
||||
DCHECK_NE(key_in, emptyKey);
|
||||
DCHECK_NE(key_in, lockedKey);
|
||||
DCHECK_NE(key_in, erasedKey);
|
||||
}
|
||||
} // namespace detail
|
||||
} // namespace detail
|
||||
|
||||
template <class KeyT, class ValueT,
|
||||
class HashFcn = std::hash<KeyT>,
|
||||
class EqualFcn = std::equal_to<KeyT>,
|
||||
class Allocator = std::allocator<char>,
|
||||
class ProbeFcn = AtomicHashArrayLinearProbeFcn,
|
||||
class KeyConvertFcn = detail::AHAIdentity>
|
||||
template <
|
||||
class KeyT,
|
||||
class ValueT,
|
||||
class HashFcn = std::hash<KeyT>,
|
||||
class EqualFcn = std::equal_to<KeyT>,
|
||||
class Allocator = std::allocator<char>,
|
||||
class ProbeFcn = AtomicHashArrayLinearProbeFcn,
|
||||
class KeyConvertFcn = Identity>
|
||||
class AtomicHashMap;
|
||||
|
||||
template <class KeyT, class ValueT,
|
||||
class HashFcn = std::hash<KeyT>,
|
||||
class EqualFcn = std::equal_to<KeyT>,
|
||||
class Allocator = std::allocator<char>,
|
||||
class ProbeFcn = AtomicHashArrayLinearProbeFcn,
|
||||
class KeyConvertFcn = detail::AHAIdentity>
|
||||
template <
|
||||
class KeyT,
|
||||
class ValueT,
|
||||
class HashFcn = std::hash<KeyT>,
|
||||
class EqualFcn = std::equal_to<KeyT>,
|
||||
class Allocator = std::allocator<char>,
|
||||
class ProbeFcn = AtomicHashArrayLinearProbeFcn,
|
||||
class KeyConvertFcn = Identity>
|
||||
class AtomicHashArray : boost::noncopyable {
|
||||
static_assert((std::is_convertible<KeyT,int32_t>::value ||
|
||||
std::is_convertible<KeyT,int64_t>::value ||
|
||||
std::is_convertible<KeyT,const void*>::value),
|
||||
"You are trying to use AtomicHashArray with disallowed key "
|
||||
"types. You must use atomically compare-and-swappable integer "
|
||||
"keys, or a different container class.");
|
||||
static_assert(
|
||||
(std::is_convertible<KeyT, int32_t>::value ||
|
||||
std::is_convertible<KeyT, int64_t>::value ||
|
||||
std::is_convertible<KeyT, const void*>::value),
|
||||
"You are trying to use AtomicHashArray with disallowed key "
|
||||
"types. You must use atomically compare-and-swappable integer "
|
||||
"keys, or a different container class.");
|
||||
|
||||
public:
|
||||
typedef KeyT key_type;
|
||||
typedef ValueT mapped_type;
|
||||
typedef HashFcn hasher;
|
||||
typedef EqualFcn key_equal;
|
||||
typedef KeyConvertFcn key_convert;
|
||||
typedef KeyT key_type;
|
||||
typedef ValueT mapped_type;
|
||||
typedef HashFcn hasher;
|
||||
typedef EqualFcn key_equal;
|
||||
typedef KeyConvertFcn key_convert;
|
||||
typedef std::pair<const KeyT, ValueT> value_type;
|
||||
typedef std::size_t size_type;
|
||||
typedef std::ptrdiff_t difference_type;
|
||||
typedef value_type& reference;
|
||||
typedef const value_type& const_reference;
|
||||
typedef value_type* pointer;
|
||||
typedef const value_type* const_pointer;
|
||||
typedef std::size_t size_type;
|
||||
typedef std::ptrdiff_t difference_type;
|
||||
typedef value_type& reference;
|
||||
typedef const value_type& const_reference;
|
||||
typedef value_type* pointer;
|
||||
typedef const value_type* const_pointer;
|
||||
|
||||
const size_t capacity_;
|
||||
const size_t maxEntries_;
|
||||
const KeyT kEmptyKey_;
|
||||
const KeyT kLockedKey_;
|
||||
const KeyT kErasedKey_;
|
||||
const size_t capacity_;
|
||||
const size_t maxEntries_;
|
||||
const KeyT kEmptyKey_;
|
||||
const KeyT kLockedKey_;
|
||||
const KeyT kErasedKey_;
|
||||
|
||||
template<class ContT, class IterVal>
|
||||
template <class ContT, class IterVal>
|
||||
struct aha_iterator;
|
||||
|
||||
typedef aha_iterator<const AtomicHashArray,const value_type> const_iterator;
|
||||
typedef aha_iterator<AtomicHashArray,value_type> iterator;
|
||||
typedef aha_iterator<const AtomicHashArray, const value_type> const_iterator;
|
||||
typedef aha_iterator<AtomicHashArray, value_type> iterator;
|
||||
|
||||
// You really shouldn't need this if you use the SmartPtr provided by create,
|
||||
// but if you really want to do something crazy like stick the released
|
||||
@ -145,7 +144,7 @@ class AtomicHashArray : boost::noncopyable {
|
||||
static void destroy(AtomicHashArray*);
|
||||
|
||||
private:
|
||||
const size_t kAnchorMask_;
|
||||
const size_t kAnchorMask_;
|
||||
|
||||
struct Deleter {
|
||||
void operator()(AtomicHashArray* ptr) {
|
||||
@ -176,23 +175,23 @@ class AtomicHashArray : boost::noncopyable {
|
||||
* deleter to make sure everything is cleaned up properly.
|
||||
*/
|
||||
struct Config {
|
||||
KeyT emptyKey;
|
||||
KeyT lockedKey;
|
||||
KeyT erasedKey;
|
||||
KeyT emptyKey;
|
||||
KeyT lockedKey;
|
||||
KeyT erasedKey;
|
||||
double maxLoadFactor;
|
||||
double growthFactor;
|
||||
int entryCountThreadCacheSize;
|
||||
uint32_t entryCountThreadCacheSize;
|
||||
size_t capacity; // if positive, overrides maxLoadFactor
|
||||
|
||||
public:
|
||||
// Cannot have constexpr ctor because some compilers rightly complain.
|
||||
Config() : emptyKey((KeyT)-1),
|
||||
lockedKey((KeyT)-2),
|
||||
erasedKey((KeyT)-3),
|
||||
maxLoadFactor(0.8),
|
||||
growthFactor(-1),
|
||||
entryCountThreadCacheSize(1000),
|
||||
capacity(0) {}
|
||||
Config()
|
||||
: emptyKey((KeyT)-1),
|
||||
lockedKey((KeyT)-2),
|
||||
erasedKey((KeyT)-3),
|
||||
maxLoadFactor(0.8),
|
||||
growthFactor(-1),
|
||||
entryCountThreadCacheSize(1000),
|
||||
capacity(0) {}
|
||||
};
|
||||
|
||||
// Cannot have pre-instantiated const Config instance because of SIOF.
|
||||
@ -215,20 +214,22 @@ class AtomicHashArray : boost::noncopyable {
|
||||
*
|
||||
* See folly/test/ArrayHashArrayTest.cpp for sample usage.
|
||||
*/
|
||||
template <typename LookupKeyT = key_type,
|
||||
typename LookupHashFcn = hasher,
|
||||
typename LookupEqualFcn = key_equal>
|
||||
template <
|
||||
typename LookupKeyT = key_type,
|
||||
typename LookupHashFcn = hasher,
|
||||
typename LookupEqualFcn = key_equal>
|
||||
iterator find(LookupKeyT k) {
|
||||
return iterator(this,
|
||||
findInternal<LookupKeyT, LookupHashFcn, LookupEqualFcn>(k).idx);
|
||||
return iterator(
|
||||
this, findInternal<LookupKeyT, LookupHashFcn, LookupEqualFcn>(k).idx);
|
||||
}
|
||||
|
||||
template <typename LookupKeyT = key_type,
|
||||
typename LookupHashFcn = hasher,
|
||||
typename LookupEqualFcn = key_equal>
|
||||
template <
|
||||
typename LookupKeyT = key_type,
|
||||
typename LookupHashFcn = hasher,
|
||||
typename LookupEqualFcn = key_equal>
|
||||
const_iterator find(LookupKeyT k) const {
|
||||
return const_cast<AtomicHashArray*>(this)->
|
||||
find<LookupKeyT, LookupHashFcn, LookupEqualFcn>(k);
|
||||
return const_cast<AtomicHashArray*>(this)
|
||||
->find<LookupKeyT, LookupHashFcn, LookupEqualFcn>(k);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -242,10 +243,10 @@ class AtomicHashArray : boost::noncopyable {
|
||||
* and success is set false. On collisions, success is set false, but the
|
||||
* iterator is set to the existing entry.
|
||||
*/
|
||||
std::pair<iterator,bool> insert(const value_type& r) {
|
||||
std::pair<iterator, bool> insert(const value_type& r) {
|
||||
return emplace(r.first, r.second);
|
||||
}
|
||||
std::pair<iterator,bool> insert(value_type&& r) {
|
||||
std::pair<iterator, bool> insert(value_type&& r) {
|
||||
return emplace(r.first, std::move(r.second));
|
||||
}
|
||||
|
||||
@ -260,18 +261,18 @@ class AtomicHashArray : boost::noncopyable {
|
||||
* equal key is already present, this method converts 'key_in' to a key of
|
||||
* type KeyT using the provided LookupKeyToKeyFcn.
|
||||
*/
|
||||
template <typename LookupKeyT = key_type,
|
||||
typename LookupHashFcn = hasher,
|
||||
typename LookupEqualFcn = key_equal,
|
||||
typename LookupKeyToKeyFcn = key_convert,
|
||||
typename... ArgTs>
|
||||
std::pair<iterator,bool> emplace(LookupKeyT key_in, ArgTs&&... vCtorArgs) {
|
||||
SimpleRetT ret = insertInternal<LookupKeyT,
|
||||
LookupHashFcn,
|
||||
LookupEqualFcn,
|
||||
LookupKeyToKeyFcn>(
|
||||
key_in,
|
||||
std::forward<ArgTs>(vCtorArgs)...);
|
||||
template <
|
||||
typename LookupKeyT = key_type,
|
||||
typename LookupHashFcn = hasher,
|
||||
typename LookupEqualFcn = key_equal,
|
||||
typename LookupKeyToKeyFcn = key_convert,
|
||||
typename... ArgTs>
|
||||
std::pair<iterator, bool> emplace(LookupKeyT key_in, ArgTs&&... vCtorArgs) {
|
||||
SimpleRetT ret = insertInternal<
|
||||
LookupKeyT,
|
||||
LookupHashFcn,
|
||||
LookupEqualFcn,
|
||||
LookupKeyToKeyFcn>(key_in, std::forward<ArgTs>(vCtorArgs)...);
|
||||
return std::make_pair(iterator(this, ret.idx), ret.success);
|
||||
}
|
||||
|
||||
@ -285,11 +286,12 @@ class AtomicHashArray : boost::noncopyable {
|
||||
// Exact number of elements in the map - note that readFull() acquires a
|
||||
// mutex. See folly/ThreadCachedInt.h for more details.
|
||||
size_t size() const {
|
||||
return numEntries_.readFull() -
|
||||
numErases_.load(std::memory_order_relaxed);
|
||||
return numEntries_.readFull() - numErases_.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
bool empty() const { return size() == 0; }
|
||||
bool empty() const {
|
||||
return size() == 0;
|
||||
}
|
||||
|
||||
iterator begin() {
|
||||
iterator it(this, 0);
|
||||
@ -302,8 +304,12 @@ class AtomicHashArray : boost::noncopyable {
|
||||
return it;
|
||||
}
|
||||
|
||||
iterator end() { return iterator(this, capacity_); }
|
||||
const_iterator end() const { return const_iterator(this, capacity_); }
|
||||
iterator end() {
|
||||
return iterator(this, capacity_);
|
||||
}
|
||||
const_iterator end() const {
|
||||
return const_iterator(this, capacity_);
|
||||
}
|
||||
|
||||
// See AtomicHashMap::findAt - access elements directly
|
||||
// WARNING: The following 2 functions will fail silently for hashtable
|
||||
@ -316,50 +322,57 @@ class AtomicHashArray : boost::noncopyable {
|
||||
return const_cast<AtomicHashArray*>(this)->findAt(idx);
|
||||
}
|
||||
|
||||
iterator makeIter(size_t idx) { return iterator(this, idx); }
|
||||
iterator makeIter(size_t idx) {
|
||||
return iterator(this, idx);
|
||||
}
|
||||
const_iterator makeIter(size_t idx) const {
|
||||
return const_iterator(this, idx);
|
||||
}
|
||||
|
||||
// The max load factor allowed for this map
|
||||
double maxLoadFactor() const { return ((double) maxEntries_) / capacity_; }
|
||||
double maxLoadFactor() const {
|
||||
return ((double)maxEntries_) / capacity_;
|
||||
}
|
||||
|
||||
void setEntryCountThreadCacheSize(uint32_t newSize) {
|
||||
numEntries_.setCacheSize(newSize);
|
||||
numPendingEntries_.setCacheSize(newSize);
|
||||
}
|
||||
|
||||
int getEntryCountThreadCacheSize() const {
|
||||
uint32_t getEntryCountThreadCacheSize() const {
|
||||
return numEntries_.getCacheSize();
|
||||
}
|
||||
|
||||
/* Private data and helper functions... */
|
||||
|
||||
private:
|
||||
friend class AtomicHashMap<KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn>;
|
||||
friend class AtomicHashMap<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn>;
|
||||
|
||||
struct SimpleRetT { size_t idx; bool success;
|
||||
struct SimpleRetT {
|
||||
size_t idx;
|
||||
bool success;
|
||||
SimpleRetT(size_t i, bool s) : idx(i), success(s) {}
|
||||
SimpleRetT() = default;
|
||||
};
|
||||
|
||||
|
||||
|
||||
template <typename LookupKeyT = key_type,
|
||||
typename LookupHashFcn = hasher,
|
||||
typename LookupEqualFcn = key_equal,
|
||||
typename LookupKeyToKeyFcn = detail::AHAIdentity,
|
||||
typename... ArgTs>
|
||||
template <
|
||||
typename LookupKeyT = key_type,
|
||||
typename LookupHashFcn = hasher,
|
||||
typename LookupEqualFcn = key_equal,
|
||||
typename LookupKeyToKeyFcn = Identity,
|
||||
typename... ArgTs>
|
||||
SimpleRetT insertInternal(LookupKeyT key, ArgTs&&... vCtorArgs);
|
||||
|
||||
template <typename LookupKeyT = key_type,
|
||||
typename LookupHashFcn = hasher,
|
||||
typename LookupEqualFcn = key_equal>
|
||||
template <
|
||||
typename LookupKeyT = key_type,
|
||||
typename LookupHashFcn = hasher,
|
||||
typename LookupEqualFcn = key_equal>
|
||||
SimpleRetT findInternal(const LookupKeyT key);
|
||||
|
||||
template <typename MaybeKeyT>
|
||||
@ -371,10 +384,10 @@ friend class AtomicHashMap<KeyT,
|
||||
// We need some illegal casting here in order to actually store
|
||||
// our value_type as a std::pair<const,>. But a little bit of
|
||||
// undefined behavior never hurt anyone ...
|
||||
static_assert(sizeof(std::atomic<KeyT>) == sizeof(KeyT),
|
||||
"std::atomic is implemented in an unexpected way for AHM");
|
||||
return
|
||||
const_cast<std::atomic<KeyT>*>(
|
||||
static_assert(
|
||||
sizeof(std::atomic<KeyT>) == sizeof(KeyT),
|
||||
"std::atomic is implemented in an unexpected way for AHM");
|
||||
return const_cast<std::atomic<KeyT>*>(
|
||||
reinterpret_cast<std::atomic<KeyT> const*>(&r.first));
|
||||
}
|
||||
|
||||
@ -392,17 +405,22 @@ friend class AtomicHashMap<KeyT,
|
||||
// reading the value, so be careful of calling size() too frequently. This
|
||||
// increases insertion throughput several times over while keeping the count
|
||||
// accurate.
|
||||
ThreadCachedInt<uint64_t> numEntries_; // Successful key inserts
|
||||
ThreadCachedInt<uint64_t> numEntries_; // Successful key inserts
|
||||
ThreadCachedInt<uint64_t> numPendingEntries_; // Used by insertInternal
|
||||
std::atomic<int64_t> isFull_; // Used by insertInternal
|
||||
std::atomic<int64_t> numErases_; // Successful key erases
|
||||
std::atomic<int64_t> numErases_; // Successful key erases
|
||||
|
||||
value_type cells_[0]; // This must be the last field of this class
|
||||
value_type cells_[0]; // This must be the last field of this class
|
||||
|
||||
// Force constructor/destructor private since create/destroy should be
|
||||
// used externally instead
|
||||
AtomicHashArray(size_t capacity, KeyT emptyKey, KeyT lockedKey,
|
||||
KeyT erasedKey, double maxLoadFactor, size_t cacheSize);
|
||||
AtomicHashArray(
|
||||
size_t capacity,
|
||||
KeyT emptyKey,
|
||||
KeyT lockedKey,
|
||||
KeyT erasedKey,
|
||||
double maxLoadFactor,
|
||||
uint32_t cacheSize);
|
||||
|
||||
~AtomicHashArray() = default;
|
||||
|
||||
@ -412,8 +430,8 @@ friend class AtomicHashMap<KeyT,
|
||||
|
||||
inline bool tryLockCell(value_type* const cell) {
|
||||
KeyT expect = kEmptyKey_;
|
||||
return cellKeyPtr(*cell)->compare_exchange_strong(expect, kLockedKey_,
|
||||
std::memory_order_acq_rel);
|
||||
return cellKeyPtr(*cell)->compare_exchange_strong(
|
||||
expect, kLockedKey_, std::memory_order_acq_rel);
|
||||
}
|
||||
|
||||
template <class LookupKeyT = key_type, class LookupHashFcn = hasher>
|
||||
@ -423,7 +441,6 @@ friend class AtomicHashMap<KeyT,
|
||||
return LIKELY(probe < capacity_) ? probe : hashVal % capacity_;
|
||||
}
|
||||
|
||||
|
||||
}; // AtomicHashArray
|
||||
|
||||
} // namespace folly
|
||||
|
626
ios/Pods/Folly/folly/AtomicHashMap-inl.h
generated
626
ios/Pods/Folly/folly/AtomicHashMap-inl.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2012-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -24,89 +24,124 @@ namespace folly {
|
||||
|
||||
// AtomicHashMap constructor -- Atomic wrapper that allows growth
|
||||
// This class has a lot of overhead (184 Bytes) so only use for big maps
|
||||
template <typename KeyT,
|
||||
typename ValueT,
|
||||
typename HashFcn,
|
||||
typename EqualFcn,
|
||||
typename Allocator,
|
||||
typename ProbeFcn,
|
||||
typename KeyConvertFcn>
|
||||
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
Allocator, ProbeFcn, KeyConvertFcn>::
|
||||
AtomicHashMap(size_t finalSizeEst, const Config& config)
|
||||
: kGrowthFrac_(config.growthFactor < 0 ?
|
||||
1.0 - config.maxLoadFactor : config.growthFactor) {
|
||||
CHECK(config.maxLoadFactor > 0.0 && config.maxLoadFactor < 1.0);
|
||||
subMaps_[0].store(SubMap::create(finalSizeEst, config).release(),
|
||||
std::memory_order_relaxed);
|
||||
template <
|
||||
typename KeyT,
|
||||
typename ValueT,
|
||||
typename HashFcn,
|
||||
typename EqualFcn,
|
||||
typename Allocator,
|
||||
typename ProbeFcn,
|
||||
typename KeyConvertFcn>
|
||||
AtomicHashMap<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn,
|
||||
KeyConvertFcn>::AtomicHashMap(size_t finalSizeEst, const Config& config)
|
||||
: kGrowthFrac_(
|
||||
config.growthFactor < 0 ? 1.0f - config.maxLoadFactor
|
||||
: config.growthFactor) {
|
||||
CHECK(config.maxLoadFactor > 0.0f && config.maxLoadFactor < 1.0f);
|
||||
subMaps_[0].store(
|
||||
SubMap::create(finalSizeEst, config).release(),
|
||||
std::memory_order_relaxed);
|
||||
auto subMapCount = kNumSubMaps_;
|
||||
FOR_EACH_RANGE(i, 1, subMapCount) {
|
||||
FOR_EACH_RANGE (i, 1, subMapCount) {
|
||||
subMaps_[i].store(nullptr, std::memory_order_relaxed);
|
||||
}
|
||||
numMapsAllocated_.store(1, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
// emplace --
|
||||
template <typename KeyT,
|
||||
typename ValueT,
|
||||
typename HashFcn,
|
||||
typename EqualFcn,
|
||||
typename Allocator,
|
||||
typename ProbeFcn,
|
||||
typename KeyConvertFcn>
|
||||
template <typename LookupKeyT,
|
||||
typename LookupHashFcn,
|
||||
typename LookupEqualFcn,
|
||||
typename LookupKeyToKeyFcn,
|
||||
typename... ArgTs>
|
||||
std::pair<typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator,
|
||||
ProbeFcn, KeyConvertFcn>::iterator, bool>
|
||||
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
Allocator, ProbeFcn, KeyConvertFcn>::
|
||||
emplace(LookupKeyT k, ArgTs&&... vCtorArgs) {
|
||||
SimpleRetT ret = insertInternal<LookupKeyT,
|
||||
LookupHashFcn,
|
||||
LookupEqualFcn,
|
||||
LookupKeyToKeyFcn>(
|
||||
k, std::forward<ArgTs>(vCtorArgs)...);
|
||||
template <
|
||||
typename KeyT,
|
||||
typename ValueT,
|
||||
typename HashFcn,
|
||||
typename EqualFcn,
|
||||
typename Allocator,
|
||||
typename ProbeFcn,
|
||||
typename KeyConvertFcn>
|
||||
template <
|
||||
typename LookupKeyT,
|
||||
typename LookupHashFcn,
|
||||
typename LookupEqualFcn,
|
||||
typename LookupKeyToKeyFcn,
|
||||
typename... ArgTs>
|
||||
std::pair<
|
||||
typename AtomicHashMap<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn,
|
||||
KeyConvertFcn>::iterator,
|
||||
bool>
|
||||
AtomicHashMap<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn,
|
||||
KeyConvertFcn>::emplace(LookupKeyT k, ArgTs&&... vCtorArgs) {
|
||||
SimpleRetT ret = insertInternal<
|
||||
LookupKeyT,
|
||||
LookupHashFcn,
|
||||
LookupEqualFcn,
|
||||
LookupKeyToKeyFcn>(k, std::forward<ArgTs>(vCtorArgs)...);
|
||||
SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed);
|
||||
return std::make_pair(iterator(this, ret.i, subMap->makeIter(ret.j)),
|
||||
ret.success);
|
||||
return std::make_pair(
|
||||
iterator(this, ret.i, subMap->makeIter(ret.j)), ret.success);
|
||||
}
|
||||
|
||||
// insertInternal -- Allocates new sub maps as existing ones fill up.
|
||||
template <typename KeyT,
|
||||
typename ValueT,
|
||||
typename HashFcn,
|
||||
typename EqualFcn,
|
||||
typename Allocator,
|
||||
typename ProbeFcn,
|
||||
typename KeyConvertFcn>
|
||||
template <typename LookupKeyT,
|
||||
typename LookupHashFcn,
|
||||
typename LookupEqualFcn,
|
||||
typename LookupKeyToKeyFcn,
|
||||
typename... ArgTs>
|
||||
typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
Allocator, ProbeFcn, KeyConvertFcn>::
|
||||
SimpleRetT
|
||||
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
Allocator, ProbeFcn, KeyConvertFcn>::
|
||||
insertInternal(LookupKeyT key, ArgTs&&... vCtorArgs) {
|
||||
beginInsertInternal:
|
||||
template <
|
||||
typename KeyT,
|
||||
typename ValueT,
|
||||
typename HashFcn,
|
||||
typename EqualFcn,
|
||||
typename Allocator,
|
||||
typename ProbeFcn,
|
||||
typename KeyConvertFcn>
|
||||
template <
|
||||
typename LookupKeyT,
|
||||
typename LookupHashFcn,
|
||||
typename LookupEqualFcn,
|
||||
typename LookupKeyToKeyFcn,
|
||||
typename... ArgTs>
|
||||
typename AtomicHashMap<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn,
|
||||
KeyConvertFcn>::SimpleRetT
|
||||
AtomicHashMap<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn,
|
||||
KeyConvertFcn>::insertInternal(LookupKeyT key, ArgTs&&... vCtorArgs) {
|
||||
beginInsertInternal:
|
||||
auto nextMapIdx = // this maintains our state
|
||||
numMapsAllocated_.load(std::memory_order_acquire);
|
||||
numMapsAllocated_.load(std::memory_order_acquire);
|
||||
typename SubMap::SimpleRetT ret;
|
||||
FOR_EACH_RANGE(i, 0, nextMapIdx) {
|
||||
FOR_EACH_RANGE (i, 0, nextMapIdx) {
|
||||
// insert in each map successively. If one succeeds, we're done!
|
||||
SubMap* subMap = subMaps_[i].load(std::memory_order_relaxed);
|
||||
ret = subMap->template insertInternal<LookupKeyT,
|
||||
LookupHashFcn,
|
||||
LookupEqualFcn,
|
||||
LookupKeyToKeyFcn>(
|
||||
key, std::forward<ArgTs>(vCtorArgs)...);
|
||||
ret = subMap->template insertInternal<
|
||||
LookupKeyT,
|
||||
LookupHashFcn,
|
||||
LookupEqualFcn,
|
||||
LookupKeyToKeyFcn>(key, std::forward<ArgTs>(vCtorArgs)...);
|
||||
if (ret.idx == subMap->capacity_) {
|
||||
continue; //map is full, so try the next one
|
||||
continue; // map is full, so try the next one
|
||||
}
|
||||
// Either collision or success - insert in either case
|
||||
return SimpleRetT(i, ret.idx, ret.success);
|
||||
@ -125,12 +160,13 @@ insertInternal(LookupKeyT key, ArgTs&&... vCtorArgs) {
|
||||
if (tryLockMap(nextMapIdx)) {
|
||||
// Alloc a new map and shove it in. We can change whatever
|
||||
// we want because other threads are waiting on us...
|
||||
size_t numCellsAllocated = (size_t)
|
||||
(primarySubMap->capacity_ *
|
||||
std::pow(1.0 + kGrowthFrac_, nextMapIdx - 1));
|
||||
size_t newSize = (int) (numCellsAllocated * kGrowthFrac_);
|
||||
DCHECK(subMaps_[nextMapIdx].load(std::memory_order_relaxed) ==
|
||||
(SubMap*)kLockedPtr_);
|
||||
size_t numCellsAllocated = (size_t)(
|
||||
primarySubMap->capacity_ *
|
||||
std::pow(1.0 + kGrowthFrac_, nextMapIdx - 1));
|
||||
size_t newSize = size_t(numCellsAllocated * kGrowthFrac_);
|
||||
DCHECK(
|
||||
subMaps_[nextMapIdx].load(std::memory_order_relaxed) ==
|
||||
(SubMap*)kLockedPtr_);
|
||||
// create a new map using the settings stored in the first map
|
||||
|
||||
Config config;
|
||||
@ -139,14 +175,14 @@ insertInternal(LookupKeyT key, ArgTs&&... vCtorArgs) {
|
||||
config.erasedKey = primarySubMap->kErasedKey_;
|
||||
config.maxLoadFactor = primarySubMap->maxLoadFactor();
|
||||
config.entryCountThreadCacheSize =
|
||||
primarySubMap->getEntryCountThreadCacheSize();
|
||||
subMaps_[nextMapIdx].store(SubMap::create(newSize, config).release(),
|
||||
std::memory_order_relaxed);
|
||||
primarySubMap->getEntryCountThreadCacheSize();
|
||||
subMaps_[nextMapIdx].store(
|
||||
SubMap::create(newSize, config).release(), std::memory_order_relaxed);
|
||||
|
||||
// Publish the map to other threads.
|
||||
numMapsAllocated_.fetch_add(1, std::memory_order_release);
|
||||
DCHECK_EQ(nextMapIdx + 1,
|
||||
numMapsAllocated_.load(std::memory_order_relaxed));
|
||||
DCHECK_EQ(
|
||||
nextMapIdx + 1, numMapsAllocated_.load(std::memory_order_relaxed));
|
||||
} else {
|
||||
// If we lost the race, we'll have to wait for the next map to get
|
||||
// allocated before doing any insertion here.
|
||||
@ -169,20 +205,31 @@ insertInternal(LookupKeyT key, ArgTs&&... vCtorArgs) {
|
||||
}
|
||||
|
||||
// find --
|
||||
template <typename KeyT,
|
||||
typename ValueT,
|
||||
typename HashFcn,
|
||||
typename EqualFcn,
|
||||
typename Allocator,
|
||||
typename ProbeFcn,
|
||||
typename KeyConvertFcn>
|
||||
template <
|
||||
typename KeyT,
|
||||
typename ValueT,
|
||||
typename HashFcn,
|
||||
typename EqualFcn,
|
||||
typename Allocator,
|
||||
typename ProbeFcn,
|
||||
typename KeyConvertFcn>
|
||||
template <class LookupKeyT, class LookupHashFcn, class LookupEqualFcn>
|
||||
typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
Allocator, ProbeFcn, KeyConvertFcn>::
|
||||
iterator
|
||||
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
Allocator, ProbeFcn, KeyConvertFcn>::find(
|
||||
LookupKeyT k) {
|
||||
typename AtomicHashMap<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn,
|
||||
KeyConvertFcn>::iterator
|
||||
AtomicHashMap<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn,
|
||||
KeyConvertFcn>::find(LookupKeyT k) {
|
||||
SimpleRetT ret = findInternal<LookupKeyT, LookupHashFcn, LookupEqualFcn>(k);
|
||||
if (!ret.success) {
|
||||
return end();
|
||||
@ -191,54 +238,77 @@ AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
return iterator(this, ret.i, subMap->makeIter(ret.j));
|
||||
}
|
||||
|
||||
template <typename KeyT,
|
||||
typename ValueT,
|
||||
typename HashFcn,
|
||||
typename EqualFcn,
|
||||
typename Allocator,
|
||||
typename ProbeFcn,
|
||||
typename KeyConvertFcn>
|
||||
template <
|
||||
typename KeyT,
|
||||
typename ValueT,
|
||||
typename HashFcn,
|
||||
typename EqualFcn,
|
||||
typename Allocator,
|
||||
typename ProbeFcn,
|
||||
typename KeyConvertFcn>
|
||||
template <class LookupKeyT, class LookupHashFcn, class LookupEqualFcn>
|
||||
typename AtomicHashMap<KeyT, ValueT,
|
||||
HashFcn, EqualFcn, Allocator, ProbeFcn, KeyConvertFcn>::const_iterator
|
||||
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
Allocator, ProbeFcn, KeyConvertFcn>::
|
||||
find(LookupKeyT k) const {
|
||||
return const_cast<AtomicHashMap*>(this)->find<LookupKeyT,
|
||||
LookupHashFcn,
|
||||
LookupEqualFcn>(k);
|
||||
typename AtomicHashMap<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn,
|
||||
KeyConvertFcn>::const_iterator
|
||||
AtomicHashMap<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn,
|
||||
KeyConvertFcn>::find(LookupKeyT k) const {
|
||||
return const_cast<AtomicHashMap*>(this)
|
||||
->find<LookupKeyT, LookupHashFcn, LookupEqualFcn>(k);
|
||||
}
|
||||
|
||||
// findInternal --
|
||||
template <typename KeyT,
|
||||
typename ValueT,
|
||||
typename HashFcn,
|
||||
typename EqualFcn,
|
||||
typename Allocator,
|
||||
typename ProbeFcn,
|
||||
typename KeyConvertFcn>
|
||||
template <
|
||||
typename KeyT,
|
||||
typename ValueT,
|
||||
typename HashFcn,
|
||||
typename EqualFcn,
|
||||
typename Allocator,
|
||||
typename ProbeFcn,
|
||||
typename KeyConvertFcn>
|
||||
template <class LookupKeyT, class LookupHashFcn, class LookupEqualFcn>
|
||||
typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
Allocator, ProbeFcn, KeyConvertFcn>::
|
||||
SimpleRetT
|
||||
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
Allocator, ProbeFcn, KeyConvertFcn>::
|
||||
findInternal(const LookupKeyT k) const {
|
||||
typename AtomicHashMap<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn,
|
||||
KeyConvertFcn>::SimpleRetT
|
||||
AtomicHashMap<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn,
|
||||
KeyConvertFcn>::findInternal(const LookupKeyT k) const {
|
||||
SubMap* const primaryMap = subMaps_[0].load(std::memory_order_relaxed);
|
||||
typename SubMap::SimpleRetT ret =
|
||||
primaryMap->template findInternal<LookupKeyT,
|
||||
LookupHashFcn,
|
||||
LookupEqualFcn>(k);
|
||||
primaryMap
|
||||
->template findInternal<LookupKeyT, LookupHashFcn, LookupEqualFcn>(k);
|
||||
if (LIKELY(ret.idx != primaryMap->capacity_)) {
|
||||
return SimpleRetT(0, ret.idx, ret.success);
|
||||
}
|
||||
int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
|
||||
FOR_EACH_RANGE(i, 1, numMaps) {
|
||||
const unsigned int numMaps =
|
||||
numMapsAllocated_.load(std::memory_order_acquire);
|
||||
FOR_EACH_RANGE (i, 1, numMaps) {
|
||||
// Check each map successively. If one succeeds, we're done!
|
||||
SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
|
||||
ret = thisMap->template findInternal<LookupKeyT,
|
||||
LookupHashFcn,
|
||||
LookupEqualFcn>(k);
|
||||
ret =
|
||||
thisMap
|
||||
->template findInternal<LookupKeyT, LookupHashFcn, LookupEqualFcn>(
|
||||
k);
|
||||
if (LIKELY(ret.idx != thisMap->capacity_)) {
|
||||
return SimpleRetT(i, ret.idx, ret.success);
|
||||
}
|
||||
@ -248,23 +318,34 @@ AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
}
|
||||
|
||||
// findAtInternal -- see encodeIndex() for details.
|
||||
template <typename KeyT,
|
||||
typename ValueT,
|
||||
typename HashFcn,
|
||||
typename EqualFcn,
|
||||
typename Allocator,
|
||||
typename ProbeFcn,
|
||||
typename KeyConvertFcn>
|
||||
typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
Allocator, ProbeFcn, KeyConvertFcn>::
|
||||
SimpleRetT
|
||||
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
Allocator, ProbeFcn, KeyConvertFcn>::
|
||||
findAtInternal(uint32_t idx) const {
|
||||
template <
|
||||
typename KeyT,
|
||||
typename ValueT,
|
||||
typename HashFcn,
|
||||
typename EqualFcn,
|
||||
typename Allocator,
|
||||
typename ProbeFcn,
|
||||
typename KeyConvertFcn>
|
||||
typename AtomicHashMap<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn,
|
||||
KeyConvertFcn>::SimpleRetT
|
||||
AtomicHashMap<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn,
|
||||
KeyConvertFcn>::findAtInternal(uint32_t idx) const {
|
||||
uint32_t subMapIdx, subMapOffset;
|
||||
if (idx & kSecondaryMapBit_) {
|
||||
// idx falls in a secondary map
|
||||
idx &= ~kSecondaryMapBit_; // unset secondary bit
|
||||
idx &= ~kSecondaryMapBit_; // unset secondary bit
|
||||
subMapIdx = idx >> kSubMapIndexShift_;
|
||||
DCHECK_LT(subMapIdx, numMapsAllocated_.load(std::memory_order_relaxed));
|
||||
subMapOffset = idx & kSubMapIndexMask_;
|
||||
@ -277,21 +358,32 @@ findAtInternal(uint32_t idx) const {
|
||||
}
|
||||
|
||||
// erase --
|
||||
template <typename KeyT,
|
||||
typename ValueT,
|
||||
typename HashFcn,
|
||||
typename EqualFcn,
|
||||
typename Allocator,
|
||||
typename ProbeFcn,
|
||||
typename KeyConvertFcn>
|
||||
typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
Allocator, ProbeFcn, KeyConvertFcn>::
|
||||
size_type
|
||||
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
Allocator, ProbeFcn, KeyConvertFcn>::
|
||||
erase(const KeyT k) {
|
||||
template <
|
||||
typename KeyT,
|
||||
typename ValueT,
|
||||
typename HashFcn,
|
||||
typename EqualFcn,
|
||||
typename Allocator,
|
||||
typename ProbeFcn,
|
||||
typename KeyConvertFcn>
|
||||
typename AtomicHashMap<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn,
|
||||
KeyConvertFcn>::size_type
|
||||
AtomicHashMap<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn,
|
||||
KeyConvertFcn>::erase(const KeyT k) {
|
||||
int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
|
||||
FOR_EACH_RANGE(i, 0, numMaps) {
|
||||
FOR_EACH_RANGE (i, 0, numMaps) {
|
||||
// Check each map successively. If one succeeds, we're done!
|
||||
if (subMaps_[i].load(std::memory_order_relaxed)->erase(k)) {
|
||||
return 1;
|
||||
@ -302,19 +394,25 @@ erase(const KeyT k) {
|
||||
}
|
||||
|
||||
// capacity -- summation of capacities of all submaps
|
||||
template <typename KeyT,
|
||||
typename ValueT,
|
||||
typename HashFcn,
|
||||
typename EqualFcn,
|
||||
typename Allocator,
|
||||
typename ProbeFcn,
|
||||
typename KeyConvertFcn>
|
||||
size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
Allocator, ProbeFcn, KeyConvertFcn>::
|
||||
capacity() const {
|
||||
template <
|
||||
typename KeyT,
|
||||
typename ValueT,
|
||||
typename HashFcn,
|
||||
typename EqualFcn,
|
||||
typename Allocator,
|
||||
typename ProbeFcn,
|
||||
typename KeyConvertFcn>
|
||||
size_t AtomicHashMap<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn,
|
||||
KeyConvertFcn>::capacity() const {
|
||||
size_t totalCap(0);
|
||||
int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
|
||||
FOR_EACH_RANGE(i, 0, numMaps) {
|
||||
FOR_EACH_RANGE (i, 0, numMaps) {
|
||||
totalCap += subMaps_[i].load(std::memory_order_relaxed)->capacity_;
|
||||
}
|
||||
return totalCap;
|
||||
@ -322,44 +420,53 @@ capacity() const {
|
||||
|
||||
// spaceRemaining --
|
||||
// number of new insertions until current submaps are all at max load
|
||||
template <typename KeyT,
|
||||
typename ValueT,
|
||||
typename HashFcn,
|
||||
typename EqualFcn,
|
||||
typename Allocator,
|
||||
typename ProbeFcn,
|
||||
typename KeyConvertFcn>
|
||||
size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
Allocator, ProbeFcn, KeyConvertFcn>::
|
||||
spaceRemaining() const {
|
||||
template <
|
||||
typename KeyT,
|
||||
typename ValueT,
|
||||
typename HashFcn,
|
||||
typename EqualFcn,
|
||||
typename Allocator,
|
||||
typename ProbeFcn,
|
||||
typename KeyConvertFcn>
|
||||
size_t AtomicHashMap<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn,
|
||||
KeyConvertFcn>::spaceRemaining() const {
|
||||
size_t spaceRem(0);
|
||||
int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
|
||||
FOR_EACH_RANGE(i, 0, numMaps) {
|
||||
FOR_EACH_RANGE (i, 0, numMaps) {
|
||||
SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
|
||||
spaceRem += std::max(
|
||||
0,
|
||||
thisMap->maxEntries_ - &thisMap->numEntries_.readFull()
|
||||
);
|
||||
spaceRem +=
|
||||
std::max(0, thisMap->maxEntries_ - &thisMap->numEntries_.readFull());
|
||||
}
|
||||
return spaceRem;
|
||||
}
|
||||
|
||||
// clear -- Wipes all keys and values from primary map and destroys
|
||||
// all secondary maps. Not thread safe.
|
||||
template <typename KeyT,
|
||||
typename ValueT,
|
||||
typename HashFcn,
|
||||
typename EqualFcn,
|
||||
typename Allocator,
|
||||
typename ProbeFcn,
|
||||
typename KeyConvertFcn>
|
||||
void AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
Allocator, ProbeFcn, KeyConvertFcn>::
|
||||
clear() {
|
||||
template <
|
||||
typename KeyT,
|
||||
typename ValueT,
|
||||
typename HashFcn,
|
||||
typename EqualFcn,
|
||||
typename Allocator,
|
||||
typename ProbeFcn,
|
||||
typename KeyConvertFcn>
|
||||
void AtomicHashMap<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn,
|
||||
KeyConvertFcn>::clear() {
|
||||
subMaps_[0].load(std::memory_order_relaxed)->clear();
|
||||
int const numMaps = numMapsAllocated_
|
||||
.load(std::memory_order_relaxed);
|
||||
FOR_EACH_RANGE(i, 1, numMaps) {
|
||||
int const numMaps = numMapsAllocated_.load(std::memory_order_relaxed);
|
||||
FOR_EACH_RANGE (i, 1, numMaps) {
|
||||
SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
|
||||
DCHECK(thisMap);
|
||||
SubMap::destroy(thisMap);
|
||||
@ -369,19 +476,25 @@ clear() {
|
||||
}
|
||||
|
||||
// size --
|
||||
template <typename KeyT,
|
||||
typename ValueT,
|
||||
typename HashFcn,
|
||||
typename EqualFcn,
|
||||
typename Allocator,
|
||||
typename ProbeFcn,
|
||||
typename KeyConvertFcn>
|
||||
size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
Allocator, ProbeFcn, KeyConvertFcn>::
|
||||
size() const {
|
||||
template <
|
||||
typename KeyT,
|
||||
typename ValueT,
|
||||
typename HashFcn,
|
||||
typename EqualFcn,
|
||||
typename Allocator,
|
||||
typename ProbeFcn,
|
||||
typename KeyConvertFcn>
|
||||
size_t AtomicHashMap<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn,
|
||||
KeyConvertFcn>::size() const {
|
||||
size_t totalSize(0);
|
||||
int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
|
||||
FOR_EACH_RANGE(i, 0, numMaps) {
|
||||
FOR_EACH_RANGE (i, 0, numMaps) {
|
||||
totalSize += subMaps_[i].load(std::memory_order_relaxed)->size();
|
||||
}
|
||||
return totalSize;
|
||||
@ -405,19 +518,26 @@ size() const {
|
||||
// 31 1
|
||||
// 27-30 which subMap
|
||||
// 0-26 subMap offset (index_ret input)
|
||||
template <typename KeyT,
|
||||
typename ValueT,
|
||||
typename HashFcn,
|
||||
typename EqualFcn,
|
||||
typename Allocator,
|
||||
typename ProbeFcn,
|
||||
typename KeyConvertFcn>
|
||||
inline uint32_t
|
||||
AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
Allocator, ProbeFcn, KeyConvertFcn>::
|
||||
encodeIndex(uint32_t subMap, uint32_t offset) {
|
||||
DCHECK_EQ(offset & kSecondaryMapBit_, 0); // offset can't be too big
|
||||
if (subMap == 0) return offset;
|
||||
template <
|
||||
typename KeyT,
|
||||
typename ValueT,
|
||||
typename HashFcn,
|
||||
typename EqualFcn,
|
||||
typename Allocator,
|
||||
typename ProbeFcn,
|
||||
typename KeyConvertFcn>
|
||||
inline uint32_t AtomicHashMap<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn,
|
||||
KeyConvertFcn>::encodeIndex(uint32_t subMap, uint32_t offset) {
|
||||
DCHECK_EQ(offset & kSecondaryMapBit_, 0); // offset can't be too big
|
||||
if (subMap == 0) {
|
||||
return offset;
|
||||
}
|
||||
// Make sure subMap isn't too big
|
||||
DCHECK_EQ(subMap >> kNumSubMapBits_, 0);
|
||||
// Make sure subMap bits of offset are clear
|
||||
@ -427,35 +547,40 @@ AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
return offset | (subMap << kSubMapIndexShift_) | kSecondaryMapBit_;
|
||||
}
|
||||
|
||||
|
||||
// Iterator implementation
|
||||
|
||||
template <typename KeyT,
|
||||
typename ValueT,
|
||||
typename HashFcn,
|
||||
typename EqualFcn,
|
||||
typename Allocator,
|
||||
typename ProbeFcn,
|
||||
typename KeyConvertFcn>
|
||||
template <
|
||||
typename KeyT,
|
||||
typename ValueT,
|
||||
typename HashFcn,
|
||||
typename EqualFcn,
|
||||
typename Allocator,
|
||||
typename ProbeFcn,
|
||||
typename KeyConvertFcn>
|
||||
template <class ContT, class IterVal, class SubIt>
|
||||
struct AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
Allocator, ProbeFcn, KeyConvertFcn>::
|
||||
ahm_iterator : boost::iterator_facade<ahm_iterator<ContT, IterVal, SubIt>,
|
||||
IterVal,
|
||||
boost::forward_traversal_tag> {
|
||||
explicit ahm_iterator() : ahm_(0) {}
|
||||
struct AtomicHashMap<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn,
|
||||
KeyConvertFcn>::ahm_iterator
|
||||
: boost::iterator_facade<
|
||||
ahm_iterator<ContT, IterVal, SubIt>,
|
||||
IterVal,
|
||||
boost::forward_traversal_tag> {
|
||||
explicit ahm_iterator() : ahm_(nullptr) {}
|
||||
|
||||
// Conversion ctor for interoperability between const_iterator and
|
||||
// iterator. The enable_if<> magic keeps us well-behaved for
|
||||
// is_convertible<> (v. the iterator_facade documentation).
|
||||
template<class OtherContT, class OtherVal, class OtherSubIt>
|
||||
ahm_iterator(const ahm_iterator<OtherContT,OtherVal,OtherSubIt>& o,
|
||||
typename std::enable_if<
|
||||
std::is_convertible<OtherSubIt,SubIt>::value >::type* = 0)
|
||||
: ahm_(o.ahm_)
|
||||
, subMap_(o.subMap_)
|
||||
, subIt_(o.subIt_)
|
||||
{}
|
||||
template <class OtherContT, class OtherVal, class OtherSubIt>
|
||||
ahm_iterator(
|
||||
const ahm_iterator<OtherContT, OtherVal, OtherSubIt>& o,
|
||||
typename std::enable_if<
|
||||
std::is_convertible<OtherSubIt, SubIt>::value>::type* = nullptr)
|
||||
: ahm_(o.ahm_), subMap_(o.subMap_), subIt_(o.subIt_) {}
|
||||
|
||||
/*
|
||||
* Returns the unique index that can be used for access directly
|
||||
@ -468,13 +593,8 @@ struct AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
|
||||
private:
|
||||
friend class AtomicHashMap;
|
||||
explicit ahm_iterator(ContT* ahm,
|
||||
uint32_t subMap,
|
||||
const SubIt& subIt)
|
||||
: ahm_(ahm)
|
||||
, subMap_(subMap)
|
||||
, subIt_(subIt)
|
||||
{}
|
||||
explicit ahm_iterator(ContT* ahm, uint32_t subMap, const SubIt& subIt)
|
||||
: ahm_(ahm), subMap_(subMap), subIt_(subIt) {}
|
||||
|
||||
friend class boost::iterator_core_access;
|
||||
|
||||
@ -493,23 +613,23 @@ struct AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
return isEnd() == other.isEnd();
|
||||
}
|
||||
|
||||
return subMap_ == other.subMap_ &&
|
||||
subIt_ == other.subIt_;
|
||||
return subMap_ == other.subMap_ && subIt_ == other.subIt_;
|
||||
}
|
||||
|
||||
IterVal& dereference() const {
|
||||
return *subIt_;
|
||||
}
|
||||
|
||||
bool isEnd() const { return ahm_ == nullptr; }
|
||||
bool isEnd() const {
|
||||
return ahm_ == nullptr;
|
||||
}
|
||||
|
||||
void checkAdvanceToNextSubmap() {
|
||||
if (isEnd()) {
|
||||
return;
|
||||
}
|
||||
|
||||
SubMap* thisMap = ahm_->subMaps_[subMap_].
|
||||
load(std::memory_order_relaxed);
|
||||
SubMap* thisMap = ahm_->subMaps_[subMap_].load(std::memory_order_relaxed);
|
||||
while (subIt_ == thisMap->end()) {
|
||||
// This sub iterator is done, advance to next one
|
||||
if (subMap_ + 1 <
|
||||
|
224
ios/Pods/Folly/folly/AtomicHashMap.h
generated
224
ios/Pods/Folly/folly/AtomicHashMap.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2012-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -13,7 +13,6 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* AtomicHashMap --
|
||||
*
|
||||
@ -86,15 +85,16 @@
|
||||
#include <boost/noncopyable.hpp>
|
||||
#include <boost/type_traits/is_convertible.hpp>
|
||||
|
||||
#include <stdexcept>
|
||||
#include <functional>
|
||||
#include <atomic>
|
||||
#include <functional>
|
||||
#include <stdexcept>
|
||||
|
||||
#include <folly/AtomicHashArray.h>
|
||||
#include <folly/Foreach.h>
|
||||
#include <folly/Hash.h>
|
||||
#include <folly/CPortability.h>
|
||||
#include <folly/Likely.h>
|
||||
#include <folly/ThreadCachedInt.h>
|
||||
#include <folly/container/Foreach.h>
|
||||
#include <folly/hash/Hash.h>
|
||||
|
||||
namespace folly {
|
||||
|
||||
@ -149,47 +149,57 @@ namespace folly {
|
||||
|
||||
// Thrown when insertion fails due to running out of space for
|
||||
// submaps.
|
||||
struct AtomicHashMapFullError : std::runtime_error {
|
||||
struct FOLLY_EXPORT AtomicHashMapFullError : std::runtime_error {
|
||||
explicit AtomicHashMapFullError()
|
||||
: std::runtime_error("AtomicHashMap is full")
|
||||
{}
|
||||
: std::runtime_error("AtomicHashMap is full") {}
|
||||
};
|
||||
|
||||
template<class KeyT, class ValueT, class HashFcn, class EqualFcn,
|
||||
class Allocator, class ProbeFcn, class KeyConvertFcn>
|
||||
template <
|
||||
class KeyT,
|
||||
class ValueT,
|
||||
class HashFcn,
|
||||
class EqualFcn,
|
||||
class Allocator,
|
||||
class ProbeFcn,
|
||||
class KeyConvertFcn>
|
||||
class AtomicHashMap : boost::noncopyable {
|
||||
typedef AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
Allocator, ProbeFcn, KeyConvertFcn>
|
||||
SubMap;
|
||||
typedef AtomicHashArray<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
ProbeFcn,
|
||||
KeyConvertFcn>
|
||||
SubMap;
|
||||
|
||||
public:
|
||||
typedef KeyT key_type;
|
||||
typedef ValueT mapped_type;
|
||||
typedef KeyT key_type;
|
||||
typedef ValueT mapped_type;
|
||||
typedef std::pair<const KeyT, ValueT> value_type;
|
||||
typedef HashFcn hasher;
|
||||
typedef EqualFcn key_equal;
|
||||
typedef KeyConvertFcn key_convert;
|
||||
typedef value_type* pointer;
|
||||
typedef value_type& reference;
|
||||
typedef const value_type& const_reference;
|
||||
typedef std::ptrdiff_t difference_type;
|
||||
typedef std::size_t size_type;
|
||||
typedef HashFcn hasher;
|
||||
typedef EqualFcn key_equal;
|
||||
typedef KeyConvertFcn key_convert;
|
||||
typedef value_type* pointer;
|
||||
typedef value_type& reference;
|
||||
typedef const value_type& const_reference;
|
||||
typedef std::ptrdiff_t difference_type;
|
||||
typedef std::size_t size_type;
|
||||
typedef typename SubMap::Config Config;
|
||||
|
||||
template<class ContT, class IterVal, class SubIt>
|
||||
template <class ContT, class IterVal, class SubIt>
|
||||
struct ahm_iterator;
|
||||
|
||||
typedef ahm_iterator<const AtomicHashMap,
|
||||
const value_type,
|
||||
typename SubMap::const_iterator>
|
||||
const_iterator;
|
||||
typedef ahm_iterator<AtomicHashMap,
|
||||
value_type,
|
||||
typename SubMap::iterator>
|
||||
iterator;
|
||||
typedef ahm_iterator<
|
||||
const AtomicHashMap,
|
||||
const value_type,
|
||||
typename SubMap::const_iterator>
|
||||
const_iterator;
|
||||
typedef ahm_iterator<AtomicHashMap, value_type, typename SubMap::iterator>
|
||||
iterator;
|
||||
|
||||
public:
|
||||
const float kGrowthFrac_; // How much to grow when we run out of capacity.
|
||||
const float kGrowthFrac_; // How much to grow when we run out of capacity.
|
||||
|
||||
// The constructor takes a finalSizeEst which is the optimal
|
||||
// number of elements to maximize space utilization and performance,
|
||||
@ -197,7 +207,8 @@ typedef AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
explicit AtomicHashMap(size_t finalSizeEst, const Config& c = Config());
|
||||
|
||||
~AtomicHashMap() {
|
||||
const int numMaps = numMapsAllocated_.load(std::memory_order_relaxed);
|
||||
const unsigned int numMaps =
|
||||
numMapsAllocated_.load(std::memory_order_relaxed);
|
||||
FOR_EACH_RANGE (i, 0, numMaps) {
|
||||
SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
|
||||
DCHECK(thisMap);
|
||||
@ -205,8 +216,12 @@ typedef AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
}
|
||||
}
|
||||
|
||||
key_equal key_eq() const { return key_equal(); }
|
||||
hasher hash_function() const { return hasher(); }
|
||||
key_equal key_eq() const {
|
||||
return key_equal();
|
||||
}
|
||||
hasher hash_function() const {
|
||||
return hasher();
|
||||
}
|
||||
|
||||
/*
|
||||
* insert --
|
||||
@ -223,16 +238,16 @@ typedef AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
* all sub maps are full, no element is inserted, and
|
||||
* AtomicHashMapFullError is thrown.
|
||||
*/
|
||||
std::pair<iterator,bool> insert(const value_type& r) {
|
||||
std::pair<iterator, bool> insert(const value_type& r) {
|
||||
return emplace(r.first, r.second);
|
||||
}
|
||||
std::pair<iterator,bool> insert(key_type k, const mapped_type& v) {
|
||||
std::pair<iterator, bool> insert(key_type k, const mapped_type& v) {
|
||||
return emplace(k, v);
|
||||
}
|
||||
std::pair<iterator,bool> insert(value_type&& r) {
|
||||
std::pair<iterator, bool> insert(value_type&& r) {
|
||||
return emplace(r.first, std::move(r.second));
|
||||
}
|
||||
std::pair<iterator,bool> insert(key_type k, mapped_type&& v) {
|
||||
std::pair<iterator, bool> insert(key_type k, mapped_type&& v) {
|
||||
return emplace(k, std::move(v));
|
||||
}
|
||||
|
||||
@ -247,12 +262,13 @@ typedef AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
* equal key is already present, this method converts 'key_in' to a key of
|
||||
* type KeyT using the provided LookupKeyToKeyFcn.
|
||||
*/
|
||||
template <typename LookupKeyT = key_type,
|
||||
typename LookupHashFcn = hasher,
|
||||
typename LookupEqualFcn = key_equal,
|
||||
typename LookupKeyToKeyFcn = key_convert,
|
||||
typename... ArgTs>
|
||||
std::pair<iterator,bool> emplace(LookupKeyT k, ArgTs&&... vCtorArg);
|
||||
template <
|
||||
typename LookupKeyT = key_type,
|
||||
typename LookupHashFcn = hasher,
|
||||
typename LookupEqualFcn = key_equal,
|
||||
typename LookupKeyToKeyFcn = key_convert,
|
||||
typename... ArgTs>
|
||||
std::pair<iterator, bool> emplace(LookupKeyT k, ArgTs&&... vCtorArg);
|
||||
|
||||
/*
|
||||
* find --
|
||||
@ -270,14 +286,16 @@ typedef AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
*
|
||||
* See folly/test/ArrayHashMapTest.cpp for sample usage.
|
||||
*/
|
||||
template <typename LookupKeyT = key_type,
|
||||
typename LookupHashFcn = hasher,
|
||||
typename LookupEqualFcn = key_equal>
|
||||
template <
|
||||
typename LookupKeyT = key_type,
|
||||
typename LookupHashFcn = hasher,
|
||||
typename LookupEqualFcn = key_equal>
|
||||
iterator find(LookupKeyT k);
|
||||
|
||||
template <typename LookupKeyT = key_type,
|
||||
typename LookupHashFcn = hasher,
|
||||
typename LookupEqualFcn = key_equal>
|
||||
template <
|
||||
typename LookupKeyT = key_type,
|
||||
typename LookupHashFcn = hasher,
|
||||
typename LookupEqualFcn = key_equal>
|
||||
const_iterator find(LookupKeyT k) const;
|
||||
|
||||
/*
|
||||
@ -309,13 +327,14 @@ typedef AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
*/
|
||||
size_t size() const;
|
||||
|
||||
bool empty() const { return size() == 0; }
|
||||
bool empty() const {
|
||||
return size() == 0;
|
||||
}
|
||||
|
||||
size_type count(key_type k) const {
|
||||
return find(k) == end() ? 0 : 1;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* findAt --
|
||||
*
|
||||
@ -328,8 +347,10 @@ typedef AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
iterator findAt(uint32_t idx) {
|
||||
SimpleRetT ret = findAtInternal(idx);
|
||||
DCHECK_LT(ret.i, numSubMaps());
|
||||
return iterator(this, ret.i,
|
||||
subMaps_[ret.i].load(std::memory_order_relaxed)->makeIter(ret.j));
|
||||
return iterator(
|
||||
this,
|
||||
ret.i,
|
||||
subMaps_[ret.i].load(std::memory_order_relaxed)->makeIter(ret.j));
|
||||
}
|
||||
const_iterator findAt(uint32_t idx) const {
|
||||
return const_cast<AtomicHashMap*>(this)->findAt(idx);
|
||||
@ -356,15 +377,14 @@ typedef AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
}
|
||||
|
||||
iterator begin() {
|
||||
iterator it(this, 0,
|
||||
subMaps_[0].load(std::memory_order_relaxed)->begin());
|
||||
iterator it(this, 0, subMaps_[0].load(std::memory_order_relaxed)->begin());
|
||||
it.checkAdvanceToNextSubmap();
|
||||
return it;
|
||||
}
|
||||
|
||||
const_iterator begin() const {
|
||||
const_iterator it(this, 0,
|
||||
subMaps_[0].load(std::memory_order_relaxed)->begin());
|
||||
const_iterator it(
|
||||
this, 0, subMaps_[0].load(std::memory_order_relaxed)->begin());
|
||||
it.checkAdvanceToNextSubmap();
|
||||
return it;
|
||||
}
|
||||
@ -380,26 +400,26 @@ typedef AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
/* Advanced functions for direct access: */
|
||||
|
||||
inline uint32_t recToIdx(const value_type& r, bool mayInsert = true) {
|
||||
SimpleRetT ret = mayInsert ?
|
||||
insertInternal(r.first, r.second) : findInternal(r.first);
|
||||
SimpleRetT ret =
|
||||
mayInsert ? insertInternal(r.first, r.second) : findInternal(r.first);
|
||||
return encodeIndex(ret.i, ret.j);
|
||||
}
|
||||
|
||||
inline uint32_t recToIdx(value_type&& r, bool mayInsert = true) {
|
||||
SimpleRetT ret = mayInsert ?
|
||||
insertInternal(r.first, std::move(r.second)) : findInternal(r.first);
|
||||
SimpleRetT ret = mayInsert ? insertInternal(r.first, std::move(r.second))
|
||||
: findInternal(r.first);
|
||||
return encodeIndex(ret.i, ret.j);
|
||||
}
|
||||
|
||||
inline uint32_t recToIdx(key_type k, const mapped_type& v,
|
||||
bool mayInsert = true) {
|
||||
inline uint32_t
|
||||
recToIdx(key_type k, const mapped_type& v, bool mayInsert = true) {
|
||||
SimpleRetT ret = mayInsert ? insertInternal(k, v) : findInternal(k);
|
||||
return encodeIndex(ret.i, ret.j);
|
||||
}
|
||||
|
||||
inline uint32_t recToIdx(key_type k, mapped_type&& v, bool mayInsert = true) {
|
||||
SimpleRetT ret = mayInsert ?
|
||||
insertInternal(k, std::move(v)) : findInternal(k);
|
||||
SimpleRetT ret =
|
||||
mayInsert ? insertInternal(k, std::move(v)) : findInternal(k);
|
||||
return encodeIndex(ret.i, ret.j);
|
||||
}
|
||||
|
||||
@ -418,28 +438,33 @@ typedef AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
// This limits primary submap size to 2^31 ~= 2 billion, secondary submap
|
||||
// size to 2^(32 - kNumSubMapBits_ - 1) = 2^27 ~= 130 million, and num subMaps
|
||||
// to 2^kNumSubMapBits_ = 16.
|
||||
static const uint32_t kNumSubMapBits_ = 4;
|
||||
static const uint32_t kSecondaryMapBit_ = 1u << 31; // Highest bit
|
||||
static const uint32_t kSubMapIndexShift_ = 32 - kNumSubMapBits_ - 1;
|
||||
static const uint32_t kSubMapIndexMask_ = (1 << kSubMapIndexShift_) - 1;
|
||||
static const uint32_t kNumSubMaps_ = 1 << kNumSubMapBits_;
|
||||
static const uintptr_t kLockedPtr_ = 0x88ULL << 48; // invalid pointer
|
||||
static const uint32_t kNumSubMapBits_ = 4;
|
||||
static const uint32_t kSecondaryMapBit_ = 1u << 31; // Highest bit
|
||||
static const uint32_t kSubMapIndexShift_ = 32 - kNumSubMapBits_ - 1;
|
||||
static const uint32_t kSubMapIndexMask_ = (1 << kSubMapIndexShift_) - 1;
|
||||
static const uint32_t kNumSubMaps_ = 1 << kNumSubMapBits_;
|
||||
static const uintptr_t kLockedPtr_ = 0x88ULL << 48; // invalid pointer
|
||||
|
||||
struct SimpleRetT { uint32_t i; size_t j; bool success;
|
||||
struct SimpleRetT {
|
||||
uint32_t i;
|
||||
size_t j;
|
||||
bool success;
|
||||
SimpleRetT(uint32_t ii, size_t jj, bool s) : i(ii), j(jj), success(s) {}
|
||||
SimpleRetT() = default;
|
||||
};
|
||||
|
||||
template <typename LookupKeyT = key_type,
|
||||
typename LookupHashFcn = hasher,
|
||||
typename LookupEqualFcn = key_equal,
|
||||
typename LookupKeyToKeyFcn = key_convert,
|
||||
typename... ArgTs>
|
||||
template <
|
||||
typename LookupKeyT = key_type,
|
||||
typename LookupHashFcn = hasher,
|
||||
typename LookupEqualFcn = key_equal,
|
||||
typename LookupKeyToKeyFcn = key_convert,
|
||||
typename... ArgTs>
|
||||
SimpleRetT insertInternal(LookupKeyT key, ArgTs&&... value);
|
||||
|
||||
template <typename LookupKeyT = key_type,
|
||||
typename LookupHashFcn = hasher,
|
||||
typename LookupEqualFcn = key_equal>
|
||||
template <
|
||||
typename LookupKeyT = key_type,
|
||||
typename LookupHashFcn = hasher,
|
||||
typename LookupEqualFcn = key_equal>
|
||||
SimpleRetT findInternal(const LookupKeyT k) const;
|
||||
|
||||
SimpleRetT findAtInternal(uint32_t idx) const;
|
||||
@ -447,28 +472,29 @@ typedef AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
|
||||
std::atomic<SubMap*> subMaps_[kNumSubMaps_];
|
||||
std::atomic<uint32_t> numMapsAllocated_;
|
||||
|
||||
inline bool tryLockMap(int idx) {
|
||||
inline bool tryLockMap(unsigned int idx) {
|
||||
SubMap* val = nullptr;
|
||||
return subMaps_[idx].compare_exchange_strong(val, (SubMap*)kLockedPtr_,
|
||||
std::memory_order_acquire);
|
||||
return subMaps_[idx].compare_exchange_strong(
|
||||
val, (SubMap*)kLockedPtr_, std::memory_order_acquire);
|
||||
}
|
||||
|
||||
static inline uint32_t encodeIndex(uint32_t subMap, uint32_t subMapIdx);
|
||||
|
||||
}; // AtomicHashMap
|
||||
|
||||
template <class KeyT,
|
||||
class ValueT,
|
||||
class HashFcn = std::hash<KeyT>,
|
||||
class EqualFcn = std::equal_to<KeyT>,
|
||||
class Allocator = std::allocator<char>>
|
||||
using QuadraticProbingAtomicHashMap =
|
||||
AtomicHashMap<KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
AtomicHashArrayQuadraticProbeFcn>;
|
||||
template <
|
||||
class KeyT,
|
||||
class ValueT,
|
||||
class HashFcn = std::hash<KeyT>,
|
||||
class EqualFcn = std::equal_to<KeyT>,
|
||||
class Allocator = std::allocator<char>>
|
||||
using QuadraticProbingAtomicHashMap = AtomicHashMap<
|
||||
KeyT,
|
||||
ValueT,
|
||||
HashFcn,
|
||||
EqualFcn,
|
||||
Allocator,
|
||||
AtomicHashArrayQuadraticProbeFcn>;
|
||||
} // namespace folly
|
||||
|
||||
#include <folly/AtomicHashMap-inl.h>
|
||||
|
65
ios/Pods/Folly/folly/AtomicIntrusiveLinkedList.h
generated
65
ios/Pods/Folly/folly/AtomicIntrusiveLinkedList.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2014-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -18,6 +18,7 @@
|
||||
|
||||
#include <atomic>
|
||||
#include <cassert>
|
||||
#include <utility>
|
||||
|
||||
namespace folly {
|
||||
|
||||
@ -88,13 +89,27 @@ class AtomicIntrusiveLinkedList {
|
||||
compiler bugs (GCC prior to 4.8.3 (bug 60272), clang (bug 18899),
|
||||
MSVC (bug 819819); source:
|
||||
http://en.cppreference.com/w/cpp/atomic/atomic/compare_exchange */
|
||||
} while (!head_.compare_exchange_weak(oldHead, t,
|
||||
std::memory_order_release,
|
||||
std::memory_order_relaxed));
|
||||
} while (!head_.compare_exchange_weak(
|
||||
oldHead, t, std::memory_order_release, std::memory_order_relaxed));
|
||||
|
||||
return oldHead == nullptr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Replaces the head with nullptr,
|
||||
* and calls func() on the removed elements in the order from tail to head.
|
||||
* Returns false if the list was empty.
|
||||
*/
|
||||
template <typename F>
|
||||
bool sweepOnce(F&& func) {
|
||||
if (auto head = head_.exchange(nullptr)) {
|
||||
auto rhead = reverse(head);
|
||||
unlinkAll(rhead, std::forward<F>(func));
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Repeatedly replaces the head with nullptr,
|
||||
* and calls func() on the removed elements in the order from tail to head.
|
||||
@ -102,17 +117,31 @@ class AtomicIntrusiveLinkedList {
|
||||
*/
|
||||
template <typename F>
|
||||
void sweep(F&& func) {
|
||||
while (auto head = head_.exchange(nullptr)) {
|
||||
auto rhead = reverse(head);
|
||||
while (rhead != nullptr) {
|
||||
auto t = rhead;
|
||||
rhead = next(t);
|
||||
next(t) = nullptr;
|
||||
func(t);
|
||||
}
|
||||
while (sweepOnce(func)) {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Similar to sweep() but calls func() on elements in LIFO order.
|
||||
*
|
||||
* func() is called for all elements in the list at the moment
|
||||
* reverseSweep() is called. Unlike sweep() it does not loop to ensure the
|
||||
* list is empty at some point after the last invocation. This way callers
|
||||
* can reason about the ordering: elements inserted since the last call to
|
||||
* reverseSweep() will be provided in LIFO order.
|
||||
*
|
||||
* Example: if elements are inserted in the order 1-2-3, the callback is
|
||||
* invoked 3-2-1. If the callback moves elements onto a stack, popping off
|
||||
* the stack will produce the original insertion order 1-2-3.
|
||||
*/
|
||||
template <typename F>
|
||||
void reverseSweep(F&& func) {
|
||||
// We don't loop like sweep() does because the overall order of callbacks
|
||||
// would be strand-wise LIFO which is meaningless to callers.
|
||||
auto head = head_.exchange(nullptr);
|
||||
unlinkAll(head, std::forward<F>(func));
|
||||
}
|
||||
|
||||
private:
|
||||
std::atomic<T*> head_{nullptr};
|
||||
|
||||
@ -132,6 +161,18 @@ class AtomicIntrusiveLinkedList {
|
||||
}
|
||||
return rhead;
|
||||
}
|
||||
|
||||
/* Unlinks all elements in the linked list fragment pointed to by `head',
|
||||
* calling func() on every element */
|
||||
template <typename F>
|
||||
void unlinkAll(T* head, F&& func) {
|
||||
while (head != nullptr) {
|
||||
auto t = head;
|
||||
head = next(t);
|
||||
next(t) = nullptr;
|
||||
func(t);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace folly
|
||||
|
26
ios/Pods/Folly/folly/AtomicLinkedList.h
generated
26
ios/Pods/Folly/folly/AtomicLinkedList.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2014-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -54,7 +54,7 @@ class AtomicLinkedList {
|
||||
* after the call.
|
||||
*/
|
||||
bool insertHead(T t) {
|
||||
auto wrapper = folly::make_unique<Wrapper>(std::move(t));
|
||||
auto wrapper = std::make_unique<Wrapper>(std::move(t));
|
||||
|
||||
return list_.insertHead(wrapper.release());
|
||||
}
|
||||
@ -73,6 +73,28 @@ class AtomicLinkedList {
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Similar to sweep() but calls func() on elements in LIFO order.
|
||||
*
|
||||
* func() is called for all elements in the list at the moment
|
||||
* reverseSweep() is called. Unlike sweep() it does not loop to ensure the
|
||||
* list is empty at some point after the last invocation. This way callers
|
||||
* can reason about the ordering: elements inserted since the last call to
|
||||
* reverseSweep() will be provided in LIFO order.
|
||||
*
|
||||
* Example: if elements are inserted in the order 1-2-3, the callback is
|
||||
* invoked 3-2-1. If the callback moves elements onto a stack, popping off
|
||||
* the stack will produce the original insertion order 1-2-3.
|
||||
*/
|
||||
template <typename F>
|
||||
void reverseSweep(F&& func) {
|
||||
list_.reverseSweep([&](Wrapper* wrapperPtr) mutable {
|
||||
std::unique_ptr<Wrapper> wrapper(wrapperPtr);
|
||||
|
||||
func(std::move(wrapper->data));
|
||||
});
|
||||
}
|
||||
|
||||
private:
|
||||
struct Wrapper {
|
||||
explicit Wrapper(T&& t) : data(std::move(t)) {}
|
||||
|
139
ios/Pods/Folly/folly/AtomicStruct.h
generated
139
ios/Pods/Folly/folly/AtomicStruct.h
generated
@ -1,139 +0,0 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <type_traits>
|
||||
#include <folly/Traits.h>
|
||||
#include <string.h>
|
||||
#include <stdint.h>
|
||||
|
||||
namespace folly {
|
||||
|
||||
namespace detail {
|
||||
template <int N> struct AtomicStructIntPick {};
|
||||
}
|
||||
|
||||
/// AtomicStruct<T> work like C++ atomics, but can be used on any POD
|
||||
/// type <= 8 bytes.
|
||||
template <
|
||||
typename T,
|
||||
template<typename> class Atom = std::atomic,
|
||||
typename Raw = typename detail::AtomicStructIntPick<sizeof(T)>::type>
|
||||
class AtomicStruct {
|
||||
static_assert(alignof(T) <= alignof(Raw),
|
||||
"target type can't have stricter alignment than matching int");
|
||||
static_assert(sizeof(T) <= sizeof(Raw),
|
||||
"underlying type isn't big enough");
|
||||
static_assert(std::is_trivial<T>::value ||
|
||||
folly::IsTriviallyCopyable<T>::value,
|
||||
"target type must be trivially copyable");
|
||||
|
||||
union {
|
||||
Atom<Raw> data;
|
||||
T typedData;
|
||||
};
|
||||
|
||||
static Raw encode(T v) noexcept {
|
||||
// we expect the compiler to optimize away the memcpy, but without
|
||||
// it we would violate strict aliasing rules
|
||||
Raw d = 0;
|
||||
memcpy(&d, &v, sizeof(T));
|
||||
return d;
|
||||
}
|
||||
|
||||
static T decode(Raw d) noexcept {
|
||||
T v;
|
||||
memcpy(&v, &d, sizeof(T));
|
||||
return v;
|
||||
}
|
||||
|
||||
public:
|
||||
AtomicStruct() = default;
|
||||
~AtomicStruct() = default;
|
||||
AtomicStruct(AtomicStruct<T> const &) = delete;
|
||||
AtomicStruct<T>& operator= (AtomicStruct<T> const &) = delete;
|
||||
|
||||
constexpr /* implicit */ AtomicStruct(T v) noexcept : typedData(v) {}
|
||||
|
||||
bool is_lock_free() const noexcept {
|
||||
return data.is_lock_free();
|
||||
}
|
||||
|
||||
bool compare_exchange_strong(
|
||||
T& v0, T v1,
|
||||
std::memory_order mo = std::memory_order_seq_cst) noexcept {
|
||||
Raw d0 = encode(v0);
|
||||
bool rv = data.compare_exchange_strong(d0, encode(v1), mo);
|
||||
if (!rv) {
|
||||
v0 = decode(d0);
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
bool compare_exchange_weak(
|
||||
T& v0, T v1,
|
||||
std::memory_order mo = std::memory_order_seq_cst) noexcept {
|
||||
Raw d0 = encode(v0);
|
||||
bool rv = data.compare_exchange_weak(d0, encode(v1), mo);
|
||||
if (!rv) {
|
||||
v0 = decode(d0);
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
T exchange(T v, std::memory_order mo = std::memory_order_seq_cst) noexcept {
|
||||
return decode(data.exchange(encode(v), mo));
|
||||
}
|
||||
|
||||
/* implicit */ operator T () const noexcept {
|
||||
return decode(data);
|
||||
}
|
||||
|
||||
T load(std::memory_order mo = std::memory_order_seq_cst) const noexcept {
|
||||
return decode(data.load(mo));
|
||||
}
|
||||
|
||||
T operator= (T v) noexcept {
|
||||
return decode(data = encode(v));
|
||||
}
|
||||
|
||||
void store(T v, std::memory_order mo = std::memory_order_seq_cst) noexcept {
|
||||
data.store(encode(v), mo);
|
||||
}
|
||||
|
||||
// std::atomic also provides volatile versions of all of the access
|
||||
// methods. These are callable on volatile objects, and also can
|
||||
// theoretically have different implementations than their non-volatile
|
||||
// counterpart. If someone wants them here they can easily be added
|
||||
// by duplicating the above code and the corresponding unit tests.
|
||||
};
|
||||
|
||||
namespace detail {
|
||||
|
||||
template <> struct AtomicStructIntPick<1> { typedef uint8_t type; };
|
||||
template <> struct AtomicStructIntPick<2> { typedef uint16_t type; };
|
||||
template <> struct AtomicStructIntPick<3> { typedef uint32_t type; };
|
||||
template <> struct AtomicStructIntPick<4> { typedef uint32_t type; };
|
||||
template <> struct AtomicStructIntPick<5> { typedef uint64_t type; };
|
||||
template <> struct AtomicStructIntPick<6> { typedef uint64_t type; };
|
||||
template <> struct AtomicStructIntPick<7> { typedef uint64_t type; };
|
||||
template <> struct AtomicStructIntPick<8> { typedef uint64_t type; };
|
||||
|
||||
} // namespace detail
|
||||
|
||||
} // namespace folly
|
130
ios/Pods/Folly/folly/AtomicUnorderedMap.h
generated
130
ios/Pods/Folly/folly/AtomicUnorderedMap.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2013-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -17,23 +17,23 @@
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <limits>
|
||||
#include <stdexcept>
|
||||
#include <system_error>
|
||||
#include <type_traits>
|
||||
#include <stdint.h>
|
||||
|
||||
#include <folly/Bits.h>
|
||||
#include <boost/type_traits/has_trivial_destructor.hpp>
|
||||
|
||||
#include <folly/Conv.h>
|
||||
#include <folly/Likely.h>
|
||||
#include <folly/Random.h>
|
||||
#include <folly/detail/AtomicUnorderedMapUtils.h>
|
||||
#include <folly/lang/Bits.h>
|
||||
#include <folly/portability/SysMman.h>
|
||||
#include <folly/portability/Unistd.h>
|
||||
|
||||
#include <boost/type_traits/has_trivial_destructor.hpp>
|
||||
#include <limits>
|
||||
|
||||
namespace folly {
|
||||
|
||||
/// You're probably reading this because you are looking for an
|
||||
@ -129,22 +129,22 @@ namespace folly {
|
||||
/// which is much faster than destructing all of the keys and values.
|
||||
/// Feel free to override if std::is_trivial_destructor isn't recognizing
|
||||
/// the triviality of your destructors.
|
||||
template <typename Key,
|
||||
typename Value,
|
||||
typename Hash = std::hash<Key>,
|
||||
typename KeyEqual = std::equal_to<Key>,
|
||||
bool SkipKeyValueDeletion =
|
||||
(boost::has_trivial_destructor<Key>::value &&
|
||||
boost::has_trivial_destructor<Value>::value),
|
||||
template<typename> class Atom = std::atomic,
|
||||
typename IndexType = uint32_t,
|
||||
typename Allocator = folly::detail::MMapAlloc>
|
||||
template <
|
||||
typename Key,
|
||||
typename Value,
|
||||
typename Hash = std::hash<Key>,
|
||||
typename KeyEqual = std::equal_to<Key>,
|
||||
bool SkipKeyValueDeletion =
|
||||
(boost::has_trivial_destructor<Key>::value &&
|
||||
boost::has_trivial_destructor<Value>::value),
|
||||
template <typename> class Atom = std::atomic,
|
||||
typename IndexType = uint32_t,
|
||||
typename Allocator = folly::detail::MMapAlloc>
|
||||
|
||||
struct AtomicUnorderedInsertMap {
|
||||
|
||||
typedef Key key_type;
|
||||
typedef Value mapped_type;
|
||||
typedef std::pair<Key,Value> value_type;
|
||||
typedef std::pair<Key, Value> value_type;
|
||||
typedef std::size_t size_type;
|
||||
typedef std::ptrdiff_t difference_type;
|
||||
typedef Hash hasher;
|
||||
@ -153,23 +153,21 @@ struct AtomicUnorderedInsertMap {
|
||||
|
||||
typedef struct ConstIterator {
|
||||
ConstIterator(const AtomicUnorderedInsertMap& owner, IndexType slot)
|
||||
: owner_(owner)
|
||||
, slot_(slot)
|
||||
{}
|
||||
: owner_(owner), slot_(slot) {}
|
||||
|
||||
ConstIterator(const ConstIterator&) = default;
|
||||
ConstIterator& operator= (const ConstIterator&) = default;
|
||||
ConstIterator& operator=(const ConstIterator&) = default;
|
||||
|
||||
const value_type& operator* () const {
|
||||
const value_type& operator*() const {
|
||||
return owner_.slots_[slot_].keyValue();
|
||||
}
|
||||
|
||||
const value_type* operator-> () const {
|
||||
const value_type* operator->() const {
|
||||
return &owner_.slots_[slot_].keyValue();
|
||||
}
|
||||
|
||||
// pre-increment
|
||||
const ConstIterator& operator++ () {
|
||||
const ConstIterator& operator++() {
|
||||
while (slot_ > 0) {
|
||||
--slot_;
|
||||
if (owner_.slots_[slot_].state() == LINKED) {
|
||||
@ -186,10 +184,10 @@ struct AtomicUnorderedInsertMap {
|
||||
return prev;
|
||||
}
|
||||
|
||||
bool operator== (const ConstIterator& rhs) const {
|
||||
bool operator==(const ConstIterator& rhs) const {
|
||||
return slot_ == rhs.slot_;
|
||||
}
|
||||
bool operator!= (const ConstIterator& rhs) const {
|
||||
bool operator!=(const ConstIterator& rhs) const {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
|
||||
@ -210,9 +208,8 @@ struct AtomicUnorderedInsertMap {
|
||||
size_t maxSize,
|
||||
float maxLoadFactor = 0.8f,
|
||||
const Allocator& alloc = Allocator())
|
||||
: allocator_(alloc)
|
||||
{
|
||||
size_t capacity = maxSize / std::min(1.0f, maxLoadFactor) + 128;
|
||||
: allocator_(alloc) {
|
||||
size_t capacity = size_t(maxSize / std::min(1.0f, maxLoadFactor) + 128);
|
||||
size_t avail = size_t{1} << (8 * sizeof(IndexType) - 2);
|
||||
if (capacity > avail && maxSize < avail) {
|
||||
// we'll do our best
|
||||
@ -262,8 +259,8 @@ struct AtomicUnorderedInsertMap {
|
||||
/// auto value = memo.findOrConstruct(key, [=](void* raw) {
|
||||
/// new (raw) std::string(computation(key));
|
||||
/// })->first;
|
||||
template<typename Func>
|
||||
std::pair<const_iterator,bool> findOrConstruct(const Key& key, Func&& func) {
|
||||
template <typename Func>
|
||||
std::pair<const_iterator, bool> findOrConstruct(const Key& key, Func&& func) {
|
||||
auto const slot = keyToSlotIdx(key);
|
||||
auto prev = slots_[slot].headAndState_.load(std::memory_order_acquire);
|
||||
|
||||
@ -314,11 +311,10 @@ struct AtomicUnorderedInsertMap {
|
||||
/// Eventually we can duplicate all of the std::pair constructor
|
||||
/// forms, including a recursive tuple forwarding template
|
||||
/// http://functionalcpp.wordpress.com/2013/08/28/tuple-forwarding/).
|
||||
template<class K, class V>
|
||||
std::pair<const_iterator,bool> emplace(const K& key, V&& value) {
|
||||
return findOrConstruct(key, [&](void* raw) {
|
||||
new (raw) Value(std::forward<V>(value));
|
||||
});
|
||||
template <class K, class V>
|
||||
std::pair<const_iterator, bool> emplace(const K& key, V&& value) {
|
||||
return findOrConstruct(
|
||||
key, [&](void* raw) { new (raw) Value(std::forward<V>(value)); });
|
||||
}
|
||||
|
||||
const_iterator find(const Key& key) const {
|
||||
@ -338,8 +334,7 @@ struct AtomicUnorderedInsertMap {
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
enum {
|
||||
enum : IndexType {
|
||||
kMaxAllocationTries = 1000, // after this we throw
|
||||
};
|
||||
|
||||
@ -367,9 +362,8 @@ struct AtomicUnorderedInsertMap {
|
||||
IndexType next_;
|
||||
|
||||
/// Key and Value
|
||||
typename std::aligned_storage<sizeof(value_type),
|
||||
alignof(value_type)>::type raw_;
|
||||
|
||||
typename std::aligned_storage<sizeof(value_type), alignof(value_type)>::type
|
||||
raw_;
|
||||
|
||||
~Slot() {
|
||||
auto s = state();
|
||||
@ -398,7 +392,6 @@ struct AtomicUnorderedInsertMap {
|
||||
assert(state() != EMPTY);
|
||||
return *static_cast<const value_type*>(static_cast<const void*>(&raw_));
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
// We manually manage the slot memory so we can bypass initialization
|
||||
@ -437,7 +430,7 @@ struct AtomicUnorderedInsertMap {
|
||||
/// Allocates a slot and returns its index. Tries to put it near
|
||||
/// slots_[start].
|
||||
IndexType allocateNear(IndexType start) {
|
||||
for (auto tries = 0; tries < kMaxAllocationTries; ++tries) {
|
||||
for (IndexType tries = 0; tries < kMaxAllocationTries; ++tries) {
|
||||
auto slot = allocationAttempt(start, tries);
|
||||
auto prev = slots_[slot].headAndState_.load(std::memory_order_acquire);
|
||||
if ((prev & 3) == EMPTY &&
|
||||
@ -454,13 +447,13 @@ struct AtomicUnorderedInsertMap {
|
||||
/// can specialize it differently during deterministic testing
|
||||
IndexType allocationAttempt(IndexType start, IndexType tries) const {
|
||||
if (LIKELY(tries < 8 && start + tries < numSlots_)) {
|
||||
return start + tries;
|
||||
return IndexType(start + tries);
|
||||
} else {
|
||||
IndexType rv;
|
||||
if (sizeof(IndexType) <= 4) {
|
||||
rv = folly::Random::rand32(numSlots_);
|
||||
rv = IndexType(folly::Random::rand32(numSlots_));
|
||||
} else {
|
||||
rv = folly::Random::rand64(numSlots_);
|
||||
rv = IndexType(folly::Random::rand64(numSlots_));
|
||||
}
|
||||
assert(rv < numSlots_);
|
||||
return rv;
|
||||
@ -479,31 +472,31 @@ struct AtomicUnorderedInsertMap {
|
||||
/// to select a 64 bit slot index type. Use this if you need a capacity
|
||||
/// bigger than 2^30 (about a billion). This increases memory overheads,
|
||||
/// obviously.
|
||||
template <typename Key,
|
||||
typename Value,
|
||||
typename Hash = std::hash<Key>,
|
||||
typename KeyEqual = std::equal_to<Key>,
|
||||
bool SkipKeyValueDeletion =
|
||||
(boost::has_trivial_destructor<Key>::value &&
|
||||
boost::has_trivial_destructor<Value>::value),
|
||||
template <typename> class Atom = std::atomic,
|
||||
typename Allocator = folly::detail::MMapAlloc>
|
||||
using AtomicUnorderedInsertMap64 =
|
||||
AtomicUnorderedInsertMap<Key,
|
||||
Value,
|
||||
Hash,
|
||||
KeyEqual,
|
||||
SkipKeyValueDeletion,
|
||||
Atom,
|
||||
uint64_t,
|
||||
Allocator>;
|
||||
template <
|
||||
typename Key,
|
||||
typename Value,
|
||||
typename Hash = std::hash<Key>,
|
||||
typename KeyEqual = std::equal_to<Key>,
|
||||
bool SkipKeyValueDeletion =
|
||||
(boost::has_trivial_destructor<Key>::value &&
|
||||
boost::has_trivial_destructor<Value>::value),
|
||||
template <typename> class Atom = std::atomic,
|
||||
typename Allocator = folly::detail::MMapAlloc>
|
||||
using AtomicUnorderedInsertMap64 = AtomicUnorderedInsertMap<
|
||||
Key,
|
||||
Value,
|
||||
Hash,
|
||||
KeyEqual,
|
||||
SkipKeyValueDeletion,
|
||||
Atom,
|
||||
uint64_t,
|
||||
Allocator>;
|
||||
|
||||
/// MutableAtom is a tiny wrapper than gives you the option of atomically
|
||||
/// updating values inserted into an AtomicUnorderedInsertMap<K,
|
||||
/// MutableAtom<V>>. This relies on AtomicUnorderedInsertMap's guarantee
|
||||
/// that it doesn't move values.
|
||||
template <typename T,
|
||||
template<typename> class Atom = std::atomic>
|
||||
template <typename T, template <typename> class Atom = std::atomic>
|
||||
struct MutableAtom {
|
||||
mutable Atom<T> data;
|
||||
|
||||
@ -519,5 +512,4 @@ struct MutableData {
|
||||
explicit MutableData(const T& init) : data(init) {}
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
} // namespace folly
|
||||
|
298
ios/Pods/Folly/folly/Baton.h
generated
298
ios/Pods/Folly/folly/Baton.h
generated
@ -1,298 +0,0 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
#include <atomic>
|
||||
#include <errno.h>
|
||||
#include <assert.h>
|
||||
|
||||
#include <folly/detail/Futex.h>
|
||||
#include <folly/detail/MemoryIdler.h>
|
||||
#include <folly/portability/Asm.h>
|
||||
|
||||
namespace folly {
|
||||
|
||||
/// A Baton allows a thread to block once and be awoken: it captures
|
||||
/// a single handoff. During its lifecycle (from construction/reset to
|
||||
/// destruction/reset) a baton must either be post()ed and wait()ed exactly
|
||||
/// once each, or not at all.
|
||||
///
|
||||
/// Baton includes no internal padding, and is only 4 bytes in size.
|
||||
/// Any alignment or padding to avoid false sharing is up to the user.
|
||||
///
|
||||
/// This is basically a stripped-down semaphore that supports only a
|
||||
/// single call to sem_post and a single call to sem_wait. The current
|
||||
/// posix semaphore sem_t isn't too bad, but this provides more a bit more
|
||||
/// speed, inlining, smaller size, a guarantee that the implementation
|
||||
/// won't change, and compatibility with DeterministicSchedule. By having
|
||||
/// a much more restrictive lifecycle we can also add a bunch of assertions
|
||||
/// that can help to catch race conditions ahead of time.
|
||||
template <template<typename> class Atom = std::atomic>
|
||||
struct Baton {
|
||||
constexpr Baton() : state_(INIT) {}
|
||||
|
||||
Baton(Baton const&) = delete;
|
||||
Baton& operator=(Baton const&) = delete;
|
||||
|
||||
/// It is an error to destroy a Baton on which a thread is currently
|
||||
/// wait()ing. In practice this means that the waiter usually takes
|
||||
/// responsibility for destroying the Baton.
|
||||
~Baton() {
|
||||
// The docblock for this function says that it can't be called when
|
||||
// there is a concurrent waiter. We assume a strong version of this
|
||||
// requirement in which the caller must _know_ that this is true, they
|
||||
// are not allowed to be merely lucky. If two threads are involved,
|
||||
// the destroying thread must actually have synchronized with the
|
||||
// waiting thread after wait() returned. To convey causality the the
|
||||
// waiting thread must have used release semantics and the destroying
|
||||
// thread must have used acquire semantics for that communication,
|
||||
// so we are guaranteed to see the post-wait() value of state_,
|
||||
// which cannot be WAITING.
|
||||
//
|
||||
// Note that since we only care about a single memory location,
|
||||
// the only two plausible memory orders here are relaxed and seq_cst.
|
||||
assert(state_.load(std::memory_order_relaxed) != WAITING);
|
||||
}
|
||||
|
||||
/// Equivalent to destroying the Baton and creating a new one. It is
|
||||
/// a bug to call this while there is a waiting thread, so in practice
|
||||
/// the waiter will be the one that resets the baton.
|
||||
void reset() {
|
||||
// See ~Baton for a discussion about why relaxed is okay here
|
||||
assert(state_.load(std::memory_order_relaxed) != WAITING);
|
||||
|
||||
// We use a similar argument to justify the use of a relaxed store
|
||||
// here. Since both wait() and post() are required to be called
|
||||
// only once per lifetime, no thread can actually call those methods
|
||||
// correctly after a reset() unless it synchronizes with the thread
|
||||
// that performed the reset(). If a post() or wait() on another thread
|
||||
// didn't synchronize, then regardless of what operation we performed
|
||||
// here there would be a race on proper use of the Baton's spec
|
||||
// (although not on any particular load and store). Put another way,
|
||||
// we don't need to synchronize here because anybody that might rely
|
||||
// on such synchronization is required by the baton rules to perform
|
||||
// an additional synchronization that has the desired effect anyway.
|
||||
//
|
||||
// There is actually a similar argument to be made about the
|
||||
// constructor, in which the fenceless constructor initialization
|
||||
// of state_ is piggybacked on whatever synchronization mechanism
|
||||
// distributes knowledge of the Baton's existence
|
||||
state_.store(INIT, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
/// Causes wait() to wake up. For each lifetime of a Baton (where a
|
||||
/// lifetime starts at construction or reset() and ends at destruction
|
||||
/// or reset()) there can be at most one call to post(). Any thread
|
||||
/// may call post().
|
||||
///
|
||||
/// Although we could implement a more generic semaphore semantics
|
||||
/// without any extra size or CPU overhead, the single-call limitation
|
||||
/// allows us to have better assert-ions during debug builds.
|
||||
void post() {
|
||||
uint32_t before = state_.load(std::memory_order_acquire);
|
||||
|
||||
assert(before == INIT || before == WAITING || before == TIMED_OUT);
|
||||
|
||||
if (before == INIT &&
|
||||
state_.compare_exchange_strong(before, EARLY_DELIVERY)) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert(before == WAITING || before == TIMED_OUT);
|
||||
|
||||
if (before == TIMED_OUT) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert(before == WAITING);
|
||||
state_.store(LATE_DELIVERY, std::memory_order_release);
|
||||
state_.futexWake(1);
|
||||
}
|
||||
|
||||
/// Waits until post() has been called in the current Baton lifetime.
|
||||
/// May be called at most once during a Baton lifetime (construction
|
||||
/// |reset until destruction|reset). If post is called before wait in
|
||||
/// the current lifetime then this method returns immediately.
|
||||
///
|
||||
/// The restriction that there can be at most one wait() per lifetime
|
||||
/// could be relaxed somewhat without any perf or size regressions,
|
||||
/// but by making this condition very restrictive we can provide better
|
||||
/// checking in debug builds.
|
||||
void wait() {
|
||||
if (spinWaitForEarlyDelivery()) {
|
||||
assert(state_.load(std::memory_order_acquire) == EARLY_DELIVERY);
|
||||
return;
|
||||
}
|
||||
|
||||
// guess we have to block :(
|
||||
uint32_t expected = INIT;
|
||||
if (!state_.compare_exchange_strong(expected, WAITING)) {
|
||||
// CAS failed, last minute reprieve
|
||||
assert(expected == EARLY_DELIVERY);
|
||||
return;
|
||||
}
|
||||
|
||||
while (true) {
|
||||
detail::MemoryIdler::futexWait(state_, WAITING);
|
||||
|
||||
// state_ is the truth even if FUTEX_WAIT reported a matching
|
||||
// FUTEX_WAKE, since we aren't using type-stable storage and we
|
||||
// don't guarantee reuse. The scenario goes like this: thread
|
||||
// A's last touch of a Baton is a call to wake(), which stores
|
||||
// LATE_DELIVERY and gets an unlucky context switch before delivering
|
||||
// the corresponding futexWake. Thread B sees LATE_DELIVERY
|
||||
// without consuming a futex event, because it calls futexWait
|
||||
// with an expected value of WAITING and hence doesn't go to sleep.
|
||||
// B returns, so the Baton's memory is reused and becomes another
|
||||
// Baton (or a reuse of this one). B calls futexWait on the new
|
||||
// Baton lifetime, then A wakes up and delivers a spurious futexWake
|
||||
// to the same memory location. B's futexWait will then report a
|
||||
// consumed wake event even though state_ is still WAITING.
|
||||
//
|
||||
// It would be possible to add an extra state_ dance to communicate
|
||||
// that the futexWake has been sent so that we can be sure to consume
|
||||
// it before returning, but that would be a perf and complexity hit.
|
||||
uint32_t s = state_.load(std::memory_order_acquire);
|
||||
assert(s == WAITING || s == LATE_DELIVERY);
|
||||
|
||||
if (s == LATE_DELIVERY) {
|
||||
return;
|
||||
}
|
||||
// retry
|
||||
}
|
||||
}
|
||||
|
||||
/// Similar to wait, but with a timeout. The thread is unblocked if the
|
||||
/// timeout expires.
|
||||
/// Note: Only a single call to timed_wait/wait is allowed during a baton's
|
||||
/// life-cycle (from construction/reset to destruction/reset). In other
|
||||
/// words, after timed_wait the caller can't invoke wait/timed_wait/try_wait
|
||||
/// again on the same baton without resetting it.
|
||||
///
|
||||
/// @param deadline Time until which the thread can block
|
||||
/// @return true if the baton was posted to before timeout,
|
||||
/// false otherwise
|
||||
template <typename Clock, typename Duration = typename Clock::duration>
|
||||
bool timed_wait(const std::chrono::time_point<Clock,Duration>& deadline) {
|
||||
if (spinWaitForEarlyDelivery()) {
|
||||
assert(state_.load(std::memory_order_acquire) == EARLY_DELIVERY);
|
||||
return true;
|
||||
}
|
||||
|
||||
// guess we have to block :(
|
||||
uint32_t expected = INIT;
|
||||
if (!state_.compare_exchange_strong(expected, WAITING)) {
|
||||
// CAS failed, last minute reprieve
|
||||
assert(expected == EARLY_DELIVERY);
|
||||
return true;
|
||||
}
|
||||
|
||||
while (true) {
|
||||
auto rv = state_.futexWaitUntil(WAITING, deadline);
|
||||
if (rv == folly::detail::FutexResult::TIMEDOUT) {
|
||||
state_.store(TIMED_OUT, std::memory_order_release);
|
||||
return false;
|
||||
}
|
||||
|
||||
uint32_t s = state_.load(std::memory_order_acquire);
|
||||
assert(s == WAITING || s == LATE_DELIVERY);
|
||||
if (s == LATE_DELIVERY) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Similar to timed_wait, but with a duration.
|
||||
template <typename Clock = std::chrono::steady_clock, typename Duration>
|
||||
bool timed_wait(const Duration& duration) {
|
||||
auto deadline = Clock::now() + duration;
|
||||
return timed_wait(deadline);
|
||||
}
|
||||
|
||||
/// Similar to wait, but doesn't block the thread if it hasn't been posted.
|
||||
///
|
||||
/// try_wait has the following semantics:
|
||||
/// - It is ok to call try_wait any number times on the same baton until
|
||||
/// try_wait reports that the baton has been posted.
|
||||
/// - It is ok to call timed_wait or wait on the same baton if try_wait
|
||||
/// reports that baton hasn't been posted.
|
||||
/// - If try_wait indicates that the baton has been posted, it is invalid to
|
||||
/// call wait, try_wait or timed_wait on the same baton without resetting
|
||||
///
|
||||
/// @return true if baton has been posted, false othewise
|
||||
bool try_wait() {
|
||||
auto s = state_.load(std::memory_order_acquire);
|
||||
assert(s == INIT || s == EARLY_DELIVERY);
|
||||
return s == EARLY_DELIVERY;
|
||||
}
|
||||
|
||||
private:
|
||||
enum State : uint32_t {
|
||||
INIT = 0,
|
||||
EARLY_DELIVERY = 1,
|
||||
WAITING = 2,
|
||||
LATE_DELIVERY = 3,
|
||||
TIMED_OUT = 4
|
||||
};
|
||||
|
||||
enum {
|
||||
// Must be positive. If multiple threads are actively using a
|
||||
// higher-level data structure that uses batons internally, it is
|
||||
// likely that the post() and wait() calls happen almost at the same
|
||||
// time. In this state, we lose big 50% of the time if the wait goes
|
||||
// to sleep immediately. On circa-2013 devbox hardware it costs about
|
||||
// 7 usec to FUTEX_WAIT and then be awoken (half the t/iter as the
|
||||
// posix_sem_pingpong test in BatonTests). We can improve our chances
|
||||
// of EARLY_DELIVERY by spinning for a bit, although we have to balance
|
||||
// this against the loss if we end up sleeping any way. Spins on this
|
||||
// hw take about 7 nanos (all but 0.5 nanos is the pause instruction).
|
||||
// We give ourself 300 spins, which is about 2 usec of waiting. As a
|
||||
// partial consolation, since we are using the pause instruction we
|
||||
// are giving a speed boost to the colocated hyperthread.
|
||||
PreBlockAttempts = 300,
|
||||
};
|
||||
|
||||
// Spin for "some time" (see discussion on PreBlockAttempts) waiting
|
||||
// for a post.
|
||||
//
|
||||
// @return true if we received an early delivery during the wait,
|
||||
// false otherwise. If the function returns true then
|
||||
// state_ is guaranteed to be EARLY_DELIVERY
|
||||
bool spinWaitForEarlyDelivery() {
|
||||
|
||||
static_assert(PreBlockAttempts > 0,
|
||||
"isn't this assert clearer than an uninitialized variable warning?");
|
||||
for (int i = 0; i < PreBlockAttempts; ++i) {
|
||||
if (try_wait()) {
|
||||
// hooray!
|
||||
return true;
|
||||
}
|
||||
// The pause instruction is the polite way to spin, but it doesn't
|
||||
// actually affect correctness to omit it if we don't have it.
|
||||
// Pausing donates the full capabilities of the current core to
|
||||
// its other hyperthreads for a dozen cycles or so
|
||||
asm_volatile_pause();
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
detail::Futex<Atom> state_;
|
||||
};
|
||||
|
||||
} // namespace folly
|
367
ios/Pods/Folly/folly/Benchmark.h
generated
367
ios/Pods/Folly/folly/Benchmark.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2012-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -20,17 +20,18 @@
|
||||
#include <folly/Preprocessor.h> // for FB_ANONYMOUS_VARIABLE
|
||||
#include <folly/ScopeGuard.h>
|
||||
#include <folly/Traits.h>
|
||||
#include <folly/functional/Invoke.h>
|
||||
#include <folly/portability/GFlags.h>
|
||||
#include <folly/portability/Time.h>
|
||||
|
||||
#include <cassert>
|
||||
#include <ctime>
|
||||
#include <boost/function_types/function_arity.hpp>
|
||||
#include <chrono>
|
||||
#include <functional>
|
||||
#include <glog/logging.h>
|
||||
#include <limits>
|
||||
#include <type_traits>
|
||||
|
||||
#include <boost/function_types/function_arity.hpp>
|
||||
#include <glog/logging.h>
|
||||
|
||||
DECLARE_bool(benchmark);
|
||||
|
||||
namespace folly {
|
||||
@ -53,55 +54,30 @@ inline bool runBenchmarksOnFlag() {
|
||||
|
||||
namespace detail {
|
||||
|
||||
typedef std::pair<uint64_t, unsigned int> TimeIterPair;
|
||||
using TimeIterPair =
|
||||
std::pair<std::chrono::high_resolution_clock::duration, unsigned int>;
|
||||
using BenchmarkFun = std::function<detail::TimeIterPair(unsigned int)>;
|
||||
|
||||
struct BenchmarkRegistration {
|
||||
std::string file;
|
||||
std::string name;
|
||||
BenchmarkFun func;
|
||||
};
|
||||
|
||||
struct BenchmarkResult {
|
||||
std::string file;
|
||||
std::string name;
|
||||
double timeInNs;
|
||||
};
|
||||
|
||||
/**
|
||||
* Adds a benchmark wrapped in a std::function. Only used
|
||||
* internally. Pass by value is intentional.
|
||||
*/
|
||||
void addBenchmarkImpl(const char* file,
|
||||
const char* name,
|
||||
std::function<TimeIterPair(unsigned int)>);
|
||||
|
||||
/**
|
||||
* Takes the difference between two timespec values. end is assumed to
|
||||
* occur after start.
|
||||
*/
|
||||
inline uint64_t timespecDiff(timespec end, timespec start) {
|
||||
if (end.tv_sec == start.tv_sec) {
|
||||
assert(end.tv_nsec >= start.tv_nsec);
|
||||
return end.tv_nsec - start.tv_nsec;
|
||||
}
|
||||
assert(end.tv_sec > start.tv_sec);
|
||||
auto diff = uint64_t(end.tv_sec - start.tv_sec);
|
||||
assert(diff <
|
||||
std::numeric_limits<uint64_t>::max() / 1000000000UL);
|
||||
return diff * 1000000000UL
|
||||
+ end.tv_nsec - start.tv_nsec;
|
||||
}
|
||||
|
||||
/**
|
||||
* Takes the difference between two sets of timespec values. The first
|
||||
* two come from a high-resolution clock whereas the other two come
|
||||
* from a low-resolution clock. The crux of the matter is that
|
||||
* high-res values may be bogus as documented in
|
||||
* http://linux.die.net/man/3/clock_gettime. The trouble is when the
|
||||
* running process migrates from one CPU to another, which is more
|
||||
* likely for long-running processes. Therefore we watch for high
|
||||
* differences between the two timings.
|
||||
*
|
||||
* This function is subject to further improvements.
|
||||
*/
|
||||
inline uint64_t timespecDiff(timespec end, timespec start,
|
||||
timespec endCoarse, timespec startCoarse) {
|
||||
auto fine = timespecDiff(end, start);
|
||||
auto coarse = timespecDiff(endCoarse, startCoarse);
|
||||
if (coarse - fine >= 1000000) {
|
||||
// The fine time is in all likelihood bogus
|
||||
return coarse;
|
||||
}
|
||||
return fine;
|
||||
}
|
||||
void addBenchmarkImpl(
|
||||
const char* file,
|
||||
const char* name,
|
||||
std::function<TimeIterPair(unsigned int)>);
|
||||
|
||||
} // namespace detail
|
||||
|
||||
@ -109,46 +85,52 @@ inline uint64_t timespecDiff(timespec end, timespec start,
|
||||
* Supporting type for BENCHMARK_SUSPEND defined below.
|
||||
*/
|
||||
struct BenchmarkSuspender {
|
||||
using Clock = std::chrono::high_resolution_clock;
|
||||
using TimePoint = Clock::time_point;
|
||||
using Duration = Clock::duration;
|
||||
|
||||
BenchmarkSuspender() {
|
||||
CHECK_EQ(0, clock_gettime(CLOCK_REALTIME, &start));
|
||||
start = Clock::now();
|
||||
}
|
||||
|
||||
BenchmarkSuspender(const BenchmarkSuspender &) = delete;
|
||||
BenchmarkSuspender(BenchmarkSuspender && rhs) noexcept {
|
||||
BenchmarkSuspender(const BenchmarkSuspender&) = delete;
|
||||
BenchmarkSuspender(BenchmarkSuspender&& rhs) noexcept {
|
||||
start = rhs.start;
|
||||
rhs.start.tv_nsec = rhs.start.tv_sec = 0;
|
||||
rhs.start = {};
|
||||
}
|
||||
|
||||
BenchmarkSuspender& operator=(const BenchmarkSuspender &) = delete;
|
||||
BenchmarkSuspender& operator=(BenchmarkSuspender && rhs) {
|
||||
if (start.tv_nsec > 0 || start.tv_sec > 0) {
|
||||
BenchmarkSuspender& operator=(const BenchmarkSuspender&) = delete;
|
||||
BenchmarkSuspender& operator=(BenchmarkSuspender&& rhs) {
|
||||
if (start != TimePoint{}) {
|
||||
tally();
|
||||
}
|
||||
start = rhs.start;
|
||||
rhs.start.tv_nsec = rhs.start.tv_sec = 0;
|
||||
rhs.start = {};
|
||||
return *this;
|
||||
}
|
||||
|
||||
~BenchmarkSuspender() {
|
||||
if (start.tv_nsec > 0 || start.tv_sec > 0) {
|
||||
if (start != TimePoint{}) {
|
||||
tally();
|
||||
}
|
||||
}
|
||||
|
||||
void dismiss() {
|
||||
assert(start.tv_nsec > 0 || start.tv_sec > 0);
|
||||
assert(start != TimePoint{});
|
||||
tally();
|
||||
start.tv_nsec = start.tv_sec = 0;
|
||||
start = {};
|
||||
}
|
||||
|
||||
void rehire() {
|
||||
assert(start.tv_nsec == 0 || start.tv_sec == 0);
|
||||
CHECK_EQ(0, clock_gettime(CLOCK_REALTIME, &start));
|
||||
assert(start == TimePoint{});
|
||||
start = Clock::now();
|
||||
}
|
||||
|
||||
template <class F>
|
||||
auto dismissing(F f) -> typename std::result_of<F()>::type {
|
||||
SCOPE_EXIT { rehire(); };
|
||||
auto dismissing(F f) -> invoke_result_t<F> {
|
||||
SCOPE_EXIT {
|
||||
rehire();
|
||||
};
|
||||
dismiss();
|
||||
return f();
|
||||
}
|
||||
@ -162,20 +144,18 @@ struct BenchmarkSuspender {
|
||||
}
|
||||
|
||||
/**
|
||||
* Accumulates nanoseconds spent outside benchmark.
|
||||
* Accumulates time spent outside benchmark.
|
||||
*/
|
||||
typedef uint64_t NanosecondsSpent;
|
||||
static NanosecondsSpent nsSpent;
|
||||
static Duration timeSpent;
|
||||
|
||||
private:
|
||||
private:
|
||||
void tally() {
|
||||
timespec end;
|
||||
CHECK_EQ(0, clock_gettime(CLOCK_REALTIME, &end));
|
||||
nsSpent += detail::timespecDiff(end, start);
|
||||
auto end = Clock::now();
|
||||
timeSpent += end - start;
|
||||
start = end;
|
||||
}
|
||||
|
||||
timespec start;
|
||||
TimePoint start;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -187,31 +167,25 @@ private:
|
||||
*/
|
||||
template <typename Lambda>
|
||||
typename std::enable_if<
|
||||
boost::function_types::function_arity<decltype(&Lambda::operator())>::value
|
||||
== 2
|
||||
>::type
|
||||
boost::function_types::function_arity<
|
||||
decltype(&Lambda::operator())>::value == 2>::type
|
||||
addBenchmark(const char* file, const char* name, Lambda&& lambda) {
|
||||
auto execute = [=](unsigned int times) {
|
||||
BenchmarkSuspender::nsSpent = 0;
|
||||
timespec start, end;
|
||||
BenchmarkSuspender::timeSpent = {};
|
||||
unsigned int niter;
|
||||
|
||||
// CORE MEASUREMENT STARTS
|
||||
auto const r1 = clock_gettime(CLOCK_REALTIME, &start);
|
||||
auto start = std::chrono::high_resolution_clock::now();
|
||||
niter = lambda(times);
|
||||
auto const r2 = clock_gettime(CLOCK_REALTIME, &end);
|
||||
auto end = std::chrono::high_resolution_clock::now();
|
||||
// CORE MEASUREMENT ENDS
|
||||
|
||||
CHECK_EQ(0, r1);
|
||||
CHECK_EQ(0, r2);
|
||||
|
||||
return detail::TimeIterPair(
|
||||
detail::timespecDiff(end, start) - BenchmarkSuspender::nsSpent,
|
||||
niter);
|
||||
(end - start) - BenchmarkSuspender::timeSpent, niter);
|
||||
};
|
||||
|
||||
detail::addBenchmarkImpl(file, name,
|
||||
std::function<detail::TimeIterPair(unsigned int)>(execute));
|
||||
detail::addBenchmarkImpl(
|
||||
file, name, std::function<detail::TimeIterPair(unsigned int)>(execute));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -222,17 +196,16 @@ addBenchmark(const char* file, const char* name, Lambda&& lambda) {
|
||||
*/
|
||||
template <typename Lambda>
|
||||
typename std::enable_if<
|
||||
boost::function_types::function_arity<decltype(&Lambda::operator())>::value
|
||||
== 1
|
||||
>::type
|
||||
boost::function_types::function_arity<
|
||||
decltype(&Lambda::operator())>::value == 1>::type
|
||||
addBenchmark(const char* file, const char* name, Lambda&& lambda) {
|
||||
addBenchmark(file, name, [=](unsigned int times) {
|
||||
unsigned int niter = 0;
|
||||
while (times-- > 0) {
|
||||
niter += lambda();
|
||||
}
|
||||
return niter;
|
||||
});
|
||||
unsigned int niter = 0;
|
||||
while (times-- > 0) {
|
||||
niter += lambda();
|
||||
}
|
||||
return niter;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
@ -276,20 +249,35 @@ struct DoNotOptimizeAwayNeedsIndirect {
|
||||
// First two constraints ensure it can be an "r" operand.
|
||||
// std::is_pointer check is because callers seem to expect that
|
||||
// doNotOptimizeAway(&x) is equivalent to doNotOptimizeAway(x).
|
||||
constexpr static bool value = !folly::IsTriviallyCopyable<Decayed>::value ||
|
||||
constexpr static bool value = !folly::is_trivially_copyable<Decayed>::value ||
|
||||
sizeof(Decayed) > sizeof(long) || std::is_pointer<Decayed>::value;
|
||||
};
|
||||
} // detail namespace
|
||||
} // namespace detail
|
||||
|
||||
template <typename T>
|
||||
auto doNotOptimizeAway(const T& datum) -> typename std::enable_if<
|
||||
!detail::DoNotOptimizeAwayNeedsIndirect<T>::value>::type {
|
||||
asm volatile("" ::"X"(datum));
|
||||
// The "r" constraint forces the compiler to make datum available
|
||||
// in a register to the asm block, which means that it must have
|
||||
// computed/loaded it. We use this path for things that are <=
|
||||
// sizeof(long) (they have to fit), trivial (otherwise the compiler
|
||||
// doesn't want to put them in a register), and not a pointer (because
|
||||
// doNotOptimizeAway(&foo) would otherwise be a foot gun that didn't
|
||||
// necessarily compute foo).
|
||||
//
|
||||
// An earlier version of this method had a more permissive input operand
|
||||
// constraint, but that caused unnecessary variation between clang and
|
||||
// gcc benchmarks.
|
||||
asm volatile("" ::"r"(datum));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
auto doNotOptimizeAway(const T& datum) -> typename std::enable_if<
|
||||
detail::DoNotOptimizeAwayNeedsIndirect<T>::value>::type {
|
||||
// This version of doNotOptimizeAway tells the compiler that the asm
|
||||
// block will read datum from memory, and that in addition it might read
|
||||
// or write from any memory location. If the memory clobber could be
|
||||
// separated into input and output that would be preferrable.
|
||||
asm volatile("" ::"m"(datum) : "memory");
|
||||
}
|
||||
|
||||
@ -307,19 +295,37 @@ auto makeUnpredictable(T& datum) -> typename std::enable_if<
|
||||
|
||||
#endif
|
||||
|
||||
struct dynamic;
|
||||
|
||||
void benchmarkResultsToDynamic(
|
||||
const std::vector<detail::BenchmarkResult>& data,
|
||||
dynamic&);
|
||||
|
||||
void benchmarkResultsFromDynamic(
|
||||
const dynamic&,
|
||||
std::vector<detail::BenchmarkResult>&);
|
||||
|
||||
void printResultComparison(
|
||||
const std::vector<detail::BenchmarkResult>& base,
|
||||
const std::vector<detail::BenchmarkResult>& test);
|
||||
|
||||
} // namespace folly
|
||||
|
||||
/**
|
||||
* Introduces a benchmark function. Used internally, see BENCHMARK and
|
||||
* friends below.
|
||||
*/
|
||||
#define BENCHMARK_IMPL(funName, stringName, rv, paramType, paramName) \
|
||||
static void funName(paramType); \
|
||||
static bool FB_ANONYMOUS_VARIABLE(follyBenchmarkUnused) = ( \
|
||||
::folly::addBenchmark(__FILE__, stringName, \
|
||||
[](paramType paramName) -> unsigned { funName(paramName); \
|
||||
return rv; }), \
|
||||
true); \
|
||||
#define BENCHMARK_IMPL(funName, stringName, rv, paramType, paramName) \
|
||||
static void funName(paramType); \
|
||||
static bool FB_ANONYMOUS_VARIABLE(follyBenchmarkUnused) = \
|
||||
(::folly::addBenchmark( \
|
||||
__FILE__, \
|
||||
stringName, \
|
||||
[](paramType paramName) -> unsigned { \
|
||||
funName(paramName); \
|
||||
return rv; \
|
||||
}), \
|
||||
true); \
|
||||
static void funName(paramType paramName)
|
||||
|
||||
/**
|
||||
@ -329,10 +335,12 @@ auto makeUnpredictable(T& datum) -> typename std::enable_if<
|
||||
*/
|
||||
#define BENCHMARK_MULTI_IMPL(funName, stringName, paramType, paramName) \
|
||||
static unsigned funName(paramType); \
|
||||
static bool FB_ANONYMOUS_VARIABLE(follyBenchmarkUnused) = ( \
|
||||
::folly::addBenchmark(__FILE__, stringName, \
|
||||
[](paramType paramName) { return funName(paramName); }), \
|
||||
true); \
|
||||
static bool FB_ANONYMOUS_VARIABLE(follyBenchmarkUnused) = \
|
||||
(::folly::addBenchmark( \
|
||||
__FILE__, \
|
||||
stringName, \
|
||||
[](paramType paramName) { return funName(paramName); }), \
|
||||
true); \
|
||||
static unsigned funName(paramType paramName)
|
||||
|
||||
/**
|
||||
@ -354,13 +362,13 @@ auto makeUnpredictable(T& datum) -> typename std::enable_if<
|
||||
* }
|
||||
* }
|
||||
*/
|
||||
#define BENCHMARK(name, ...) \
|
||||
BENCHMARK_IMPL( \
|
||||
name, \
|
||||
FB_STRINGIZE(name), \
|
||||
FB_ARG_2_OR_1(1, ## __VA_ARGS__), \
|
||||
FB_ONE_OR_NONE(unsigned, ## __VA_ARGS__), \
|
||||
__VA_ARGS__)
|
||||
#define BENCHMARK(name, ...) \
|
||||
BENCHMARK_IMPL( \
|
||||
name, \
|
||||
FB_STRINGIZE(name), \
|
||||
FB_ARG_2_OR_1(1, ##__VA_ARGS__), \
|
||||
FB_ONE_OR_NONE(unsigned, ##__VA_ARGS__), \
|
||||
__VA_ARGS__)
|
||||
|
||||
/**
|
||||
* Like BENCHMARK above, but allows the user to return the actual
|
||||
@ -377,19 +385,19 @@ auto makeUnpredictable(T& datum) -> typename std::enable_if<
|
||||
* return testCases.size();
|
||||
* }
|
||||
*/
|
||||
#define BENCHMARK_MULTI(name, ...) \
|
||||
BENCHMARK_MULTI_IMPL( \
|
||||
name, \
|
||||
FB_STRINGIZE(name), \
|
||||
FB_ONE_OR_NONE(unsigned, ## __VA_ARGS__), \
|
||||
__VA_ARGS__)
|
||||
#define BENCHMARK_MULTI(name, ...) \
|
||||
BENCHMARK_MULTI_IMPL( \
|
||||
name, \
|
||||
FB_STRINGIZE(name), \
|
||||
FB_ONE_OR_NONE(unsigned, ##__VA_ARGS__), \
|
||||
__VA_ARGS__)
|
||||
|
||||
/**
|
||||
* Defines a benchmark that passes a parameter to another one. This is
|
||||
* common for benchmarks that need a "problem size" in addition to
|
||||
* "number of iterations". Consider:
|
||||
*
|
||||
* void pushBack(uint n, size_t initialSize) {
|
||||
* void pushBack(uint32_t n, size_t initialSize) {
|
||||
* vector<int> v;
|
||||
* BENCHMARK_SUSPEND {
|
||||
* v.resize(initialSize);
|
||||
@ -406,14 +414,13 @@ auto makeUnpredictable(T& datum) -> typename std::enable_if<
|
||||
* initial sizes of the vector. The framework will pass 0, 1000, and
|
||||
* 1000000 for initialSize, and the iteration count for n.
|
||||
*/
|
||||
#define BENCHMARK_PARAM(name, param) \
|
||||
BENCHMARK_NAMED_PARAM(name, param, param)
|
||||
#define BENCHMARK_PARAM(name, param) BENCHMARK_NAMED_PARAM(name, param, param)
|
||||
|
||||
/**
|
||||
* Same as BENCHMARK_PARAM, but allows one to return the actual number of
|
||||
* iterations that have been run.
|
||||
*/
|
||||
#define BENCHMARK_PARAM_MULTI(name, param) \
|
||||
#define BENCHMARK_PARAM_MULTI(name, param) \
|
||||
BENCHMARK_NAMED_PARAM_MULTI(name, param, param)
|
||||
|
||||
/*
|
||||
@ -425,7 +432,7 @@ auto makeUnpredictable(T& datum) -> typename std::enable_if<
|
||||
*
|
||||
* For example:
|
||||
*
|
||||
* void addValue(uint n, int64_t bucketSize, int64_t min, int64_t max) {
|
||||
* void addValue(uint32_t n, int64_t bucketSize, int64_t min, int64_t max) {
|
||||
* Histogram<int64_t> hist(bucketSize, min, max);
|
||||
* int64_t num = min;
|
||||
* FOR_EACH_RANGE (i, 0, n) {
|
||||
@ -439,27 +446,27 @@ auto makeUnpredictable(T& datum) -> typename std::enable_if<
|
||||
* BENCHMARK_NAMED_PARAM(addValue, 0_to_1000, 10, 0, 1000)
|
||||
* BENCHMARK_NAMED_PARAM(addValue, 5k_to_20k, 250, 5000, 20000)
|
||||
*/
|
||||
#define BENCHMARK_NAMED_PARAM(name, param_name, ...) \
|
||||
BENCHMARK_IMPL( \
|
||||
FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \
|
||||
FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")", \
|
||||
iters, \
|
||||
unsigned, \
|
||||
iters) { \
|
||||
name(iters, ## __VA_ARGS__); \
|
||||
#define BENCHMARK_NAMED_PARAM(name, param_name, ...) \
|
||||
BENCHMARK_IMPL( \
|
||||
FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \
|
||||
FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")", \
|
||||
iters, \
|
||||
unsigned, \
|
||||
iters) { \
|
||||
name(iters, ##__VA_ARGS__); \
|
||||
}
|
||||
|
||||
/**
|
||||
* Same as BENCHMARK_NAMED_PARAM, but allows one to return the actual number
|
||||
* of iterations that have been run.
|
||||
*/
|
||||
#define BENCHMARK_NAMED_PARAM_MULTI(name, param_name, ...) \
|
||||
BENCHMARK_MULTI_IMPL( \
|
||||
FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \
|
||||
FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")", \
|
||||
unsigned, \
|
||||
iters) { \
|
||||
return name(iters, ## __VA_ARGS__); \
|
||||
#define BENCHMARK_NAMED_PARAM_MULTI(name, param_name, ...) \
|
||||
BENCHMARK_MULTI_IMPL( \
|
||||
FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \
|
||||
FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")", \
|
||||
unsigned, \
|
||||
iters) { \
|
||||
return name(iters, ##__VA_ARGS__); \
|
||||
}
|
||||
|
||||
/**
|
||||
@ -486,71 +493,71 @@ auto makeUnpredictable(T& datum) -> typename std::enable_if<
|
||||
* baseline. Another BENCHMARK() occurrence effectively establishes a
|
||||
* new baseline.
|
||||
*/
|
||||
#define BENCHMARK_RELATIVE(name, ...) \
|
||||
BENCHMARK_IMPL( \
|
||||
name, \
|
||||
"%" FB_STRINGIZE(name), \
|
||||
FB_ARG_2_OR_1(1, ## __VA_ARGS__), \
|
||||
FB_ONE_OR_NONE(unsigned, ## __VA_ARGS__), \
|
||||
__VA_ARGS__)
|
||||
#define BENCHMARK_RELATIVE(name, ...) \
|
||||
BENCHMARK_IMPL( \
|
||||
name, \
|
||||
"%" FB_STRINGIZE(name), \
|
||||
FB_ARG_2_OR_1(1, ##__VA_ARGS__), \
|
||||
FB_ONE_OR_NONE(unsigned, ##__VA_ARGS__), \
|
||||
__VA_ARGS__)
|
||||
|
||||
/**
|
||||
* Same as BENCHMARK_RELATIVE, but allows one to return the actual number
|
||||
* of iterations that have been run.
|
||||
*/
|
||||
#define BENCHMARK_RELATIVE_MULTI(name, ...) \
|
||||
BENCHMARK_MULTI_IMPL( \
|
||||
name, \
|
||||
"%" FB_STRINGIZE(name), \
|
||||
FB_ONE_OR_NONE(unsigned, ## __VA_ARGS__), \
|
||||
__VA_ARGS__)
|
||||
#define BENCHMARK_RELATIVE_MULTI(name, ...) \
|
||||
BENCHMARK_MULTI_IMPL( \
|
||||
name, \
|
||||
"%" FB_STRINGIZE(name), \
|
||||
FB_ONE_OR_NONE(unsigned, ##__VA_ARGS__), \
|
||||
__VA_ARGS__)
|
||||
|
||||
/**
|
||||
* A combination of BENCHMARK_RELATIVE and BENCHMARK_PARAM.
|
||||
*/
|
||||
#define BENCHMARK_RELATIVE_PARAM(name, param) \
|
||||
#define BENCHMARK_RELATIVE_PARAM(name, param) \
|
||||
BENCHMARK_RELATIVE_NAMED_PARAM(name, param, param)
|
||||
|
||||
/**
|
||||
* Same as BENCHMARK_RELATIVE_PARAM, but allows one to return the actual
|
||||
* number of iterations that have been run.
|
||||
*/
|
||||
#define BENCHMARK_RELATIVE_PARAM_MULTI(name, param) \
|
||||
#define BENCHMARK_RELATIVE_PARAM_MULTI(name, param) \
|
||||
BENCHMARK_RELATIVE_NAMED_PARAM_MULTI(name, param, param)
|
||||
|
||||
/**
|
||||
* A combination of BENCHMARK_RELATIVE and BENCHMARK_NAMED_PARAM.
|
||||
*/
|
||||
#define BENCHMARK_RELATIVE_NAMED_PARAM(name, param_name, ...) \
|
||||
BENCHMARK_IMPL( \
|
||||
FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \
|
||||
"%" FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")", \
|
||||
iters, \
|
||||
unsigned, \
|
||||
iters) { \
|
||||
name(iters, ## __VA_ARGS__); \
|
||||
#define BENCHMARK_RELATIVE_NAMED_PARAM(name, param_name, ...) \
|
||||
BENCHMARK_IMPL( \
|
||||
FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \
|
||||
"%" FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")", \
|
||||
iters, \
|
||||
unsigned, \
|
||||
iters) { \
|
||||
name(iters, ##__VA_ARGS__); \
|
||||
}
|
||||
|
||||
/**
|
||||
* Same as BENCHMARK_RELATIVE_NAMED_PARAM, but allows one to return the
|
||||
* actual number of iterations that have been run.
|
||||
*/
|
||||
#define BENCHMARK_RELATIVE_NAMED_PARAM_MULTI(name, param_name, ...) \
|
||||
BENCHMARK_MULTI_IMPL( \
|
||||
FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \
|
||||
"%" FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")", \
|
||||
unsigned, \
|
||||
iters) { \
|
||||
return name(iters, ## __VA_ARGS__); \
|
||||
#define BENCHMARK_RELATIVE_NAMED_PARAM_MULTI(name, param_name, ...) \
|
||||
BENCHMARK_MULTI_IMPL( \
|
||||
FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \
|
||||
"%" FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")", \
|
||||
unsigned, \
|
||||
iters) { \
|
||||
return name(iters, ##__VA_ARGS__); \
|
||||
}
|
||||
|
||||
/**
|
||||
* Draws a line of dashes.
|
||||
*/
|
||||
#define BENCHMARK_DRAW_LINE() \
|
||||
static bool FB_ANONYMOUS_VARIABLE(follyBenchmarkUnused) = ( \
|
||||
::folly::addBenchmark(__FILE__, "-", []() -> unsigned { return 0; }), \
|
||||
true);
|
||||
#define BENCHMARK_DRAW_LINE() \
|
||||
static bool FB_ANONYMOUS_VARIABLE(follyBenchmarkUnused) = \
|
||||
(::folly::addBenchmark(__FILE__, "-", []() -> unsigned { return 0; }), \
|
||||
true)
|
||||
|
||||
/**
|
||||
* Allows execution of code that doesn't count torward the benchmark's
|
||||
@ -566,7 +573,7 @@ auto makeUnpredictable(T& datum) -> typename std::enable_if<
|
||||
* }
|
||||
* }
|
||||
*/
|
||||
#define BENCHMARK_SUSPEND \
|
||||
if (auto FB_ANONYMOUS_VARIABLE(BENCHMARK_SUSPEND) = \
|
||||
::folly::BenchmarkSuspender()) {} \
|
||||
else
|
||||
#define BENCHMARK_SUSPEND \
|
||||
if (auto FB_ANONYMOUS_VARIABLE(BENCHMARK_SUSPEND) = \
|
||||
::folly::BenchmarkSuspender()) { \
|
||||
} else
|
||||
|
93
ios/Pods/Folly/folly/Bits.cpp
generated
93
ios/Pods/Folly/folly/Bits.cpp
generated
@ -1,93 +0,0 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <folly/Bits.h>
|
||||
|
||||
#include <folly/CpuId.h>
|
||||
#include <folly/Portability.h>
|
||||
|
||||
// None of this is necessary if we're compiling for a target that supports
|
||||
// popcnt, which includes MSVC
|
||||
#if !defined(__POPCNT__) && !defined(_MSC_VER)
|
||||
namespace {
|
||||
|
||||
int popcount_builtin(unsigned int x) {
|
||||
return __builtin_popcount(x);
|
||||
}
|
||||
|
||||
int popcountll_builtin(unsigned long long x) {
|
||||
return __builtin_popcountll(x);
|
||||
}
|
||||
|
||||
#if FOLLY_HAVE_IFUNC && !defined(FOLLY_SANITIZE_ADDRESS)
|
||||
|
||||
// Strictly speaking, these versions of popcount are usable without ifunc
|
||||
// support. However, we would have to check, via CpuId, if the processor
|
||||
// implements the popcnt instruction first, which is what we use ifunc for.
|
||||
int popcount_inst(unsigned int x) {
|
||||
int n;
|
||||
asm ("popcntl %1, %0" : "=r" (n) : "r" (x));
|
||||
return n;
|
||||
}
|
||||
|
||||
int popcountll_inst(unsigned long long x) {
|
||||
unsigned long long n;
|
||||
asm ("popcntq %1, %0" : "=r" (n) : "r" (x));
|
||||
return n;
|
||||
}
|
||||
|
||||
typedef decltype(popcount_builtin) Type_popcount;
|
||||
typedef decltype(popcountll_builtin) Type_popcountll;
|
||||
|
||||
// This function is called on startup to resolve folly::detail::popcount
|
||||
extern "C" Type_popcount* folly_popcount_ifunc() {
|
||||
return folly::CpuId().popcnt() ? popcount_inst : popcount_builtin;
|
||||
}
|
||||
|
||||
// This function is called on startup to resolve folly::detail::popcountll
|
||||
extern "C" Type_popcountll* folly_popcountll_ifunc() {
|
||||
return folly::CpuId().popcnt() ? popcountll_inst : popcountll_builtin;
|
||||
}
|
||||
|
||||
#endif // FOLLY_HAVE_IFUNC && !defined(FOLLY_SANITIZE_ADDRESS)
|
||||
|
||||
} // namespace
|
||||
|
||||
namespace folly {
|
||||
namespace detail {
|
||||
|
||||
// Call folly_popcount_ifunc on startup to resolve to either popcount_inst
|
||||
// or popcount_builtin
|
||||
int popcount(unsigned int x)
|
||||
#if FOLLY_HAVE_IFUNC && !defined(FOLLY_SANITIZE_ADDRESS)
|
||||
__attribute__((__ifunc__("folly_popcount_ifunc")));
|
||||
#else
|
||||
{ return popcount_builtin(x); }
|
||||
#endif
|
||||
|
||||
// Call folly_popcount_ifunc on startup to resolve to either popcountll_inst
|
||||
// or popcountll_builtin
|
||||
int popcountll(unsigned long long x)
|
||||
#if FOLLY_HAVE_IFUNC && !defined(FOLLY_SANITIZE_ADDRESS)
|
||||
__attribute__((__ifunc__("folly_popcountll_ifunc")));
|
||||
#else
|
||||
{ return popcountll_builtin(x); }
|
||||
#endif
|
||||
|
||||
} // namespace detail
|
||||
} // namespace folly
|
||||
|
||||
#endif /* !__POPCNT__ */
|
558
ios/Pods/Folly/folly/Bits.h
generated
558
ios/Pods/Folly/folly/Bits.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2011-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -14,558 +14,4 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Various low-level, bit-manipulation routines.
|
||||
*
|
||||
* findFirstSet(x) [constexpr]
|
||||
* find first (least significant) bit set in a value of an integral type,
|
||||
* 1-based (like ffs()). 0 = no bits are set (x == 0)
|
||||
*
|
||||
* findLastSet(x) [constexpr]
|
||||
* find last (most significant) bit set in a value of an integral type,
|
||||
* 1-based. 0 = no bits are set (x == 0)
|
||||
* for x != 0, findLastSet(x) == 1 + floor(log2(x))
|
||||
*
|
||||
* nextPowTwo(x) [constexpr]
|
||||
* Finds the next power of two >= x.
|
||||
*
|
||||
* isPowTwo(x) [constexpr]
|
||||
* return true iff x is a power of two
|
||||
*
|
||||
* popcount(x)
|
||||
* return the number of 1 bits in x
|
||||
*
|
||||
* Endian
|
||||
* convert between native, big, and little endian representation
|
||||
* Endian::big(x) big <-> native
|
||||
* Endian::little(x) little <-> native
|
||||
* Endian::swap(x) big <-> little
|
||||
*
|
||||
* BitIterator
|
||||
* Wrapper around an iterator over an integral type that iterates
|
||||
* over its underlying bits in MSb to LSb order
|
||||
*
|
||||
* findFirstSet(BitIterator begin, BitIterator end)
|
||||
* return a BitIterator pointing to the first 1 bit in [begin, end), or
|
||||
* end if all bits in [begin, end) are 0
|
||||
*
|
||||
* @author Tudor Bosman (tudorb@fb.com)
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#if !defined(__clang__) && !(defined(_MSC_VER) && (_MSC_VER < 1900))
|
||||
#define FOLLY_INTRINSIC_CONSTEXPR constexpr
|
||||
#else
|
||||
// GCC and MSVC 2015+ are the only compilers with
|
||||
// intrinsics constexpr.
|
||||
#define FOLLY_INTRINSIC_CONSTEXPR const
|
||||
#endif
|
||||
|
||||
#include <folly/Portability.h>
|
||||
#include <folly/portability/Builtins.h>
|
||||
|
||||
#include <folly/Assume.h>
|
||||
#include <folly/detail/BitsDetail.h>
|
||||
#include <folly/detail/BitIteratorDetail.h>
|
||||
#include <folly/Likely.h>
|
||||
|
||||
#if FOLLY_HAVE_BYTESWAP_H
|
||||
# include <byteswap.h>
|
||||
#endif
|
||||
|
||||
#include <cassert>
|
||||
#include <cinttypes>
|
||||
#include <iterator>
|
||||
#include <limits>
|
||||
#include <type_traits>
|
||||
#include <boost/iterator/iterator_adaptor.hpp>
|
||||
#include <stdint.h>
|
||||
|
||||
namespace folly {
|
||||
|
||||
// Generate overloads for findFirstSet as wrappers around
|
||||
// appropriate ffs, ffsl, ffsll gcc builtins
|
||||
template <class T>
|
||||
inline FOLLY_INTRINSIC_CONSTEXPR
|
||||
typename std::enable_if<
|
||||
(std::is_integral<T>::value &&
|
||||
std::is_unsigned<T>::value &&
|
||||
sizeof(T) <= sizeof(unsigned int)),
|
||||
unsigned int>::type
|
||||
findFirstSet(T x) {
|
||||
return __builtin_ffs(x);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline FOLLY_INTRINSIC_CONSTEXPR
|
||||
typename std::enable_if<
|
||||
(std::is_integral<T>::value &&
|
||||
std::is_unsigned<T>::value &&
|
||||
sizeof(T) > sizeof(unsigned int) &&
|
||||
sizeof(T) <= sizeof(unsigned long)),
|
||||
unsigned int>::type
|
||||
findFirstSet(T x) {
|
||||
return __builtin_ffsl(x);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline FOLLY_INTRINSIC_CONSTEXPR
|
||||
typename std::enable_if<
|
||||
(std::is_integral<T>::value &&
|
||||
std::is_unsigned<T>::value &&
|
||||
sizeof(T) > sizeof(unsigned long) &&
|
||||
sizeof(T) <= sizeof(unsigned long long)),
|
||||
unsigned int>::type
|
||||
findFirstSet(T x) {
|
||||
return __builtin_ffsll(x);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline FOLLY_INTRINSIC_CONSTEXPR
|
||||
typename std::enable_if<
|
||||
(std::is_integral<T>::value && std::is_signed<T>::value),
|
||||
unsigned int>::type
|
||||
findFirstSet(T x) {
|
||||
// Note that conversion from a signed type to the corresponding unsigned
|
||||
// type is technically implementation-defined, but will likely work
|
||||
// on any impementation that uses two's complement.
|
||||
return findFirstSet(static_cast<typename std::make_unsigned<T>::type>(x));
|
||||
}
|
||||
|
||||
// findLastSet: return the 1-based index of the highest bit set
|
||||
// for x > 0, findLastSet(x) == 1 + floor(log2(x))
|
||||
template <class T>
|
||||
inline FOLLY_INTRINSIC_CONSTEXPR
|
||||
typename std::enable_if<
|
||||
(std::is_integral<T>::value &&
|
||||
std::is_unsigned<T>::value &&
|
||||
sizeof(T) <= sizeof(unsigned int)),
|
||||
unsigned int>::type
|
||||
findLastSet(T x) {
|
||||
// If X is a power of two X - Y = ((X - 1) ^ Y) + 1. Doing this transformation
|
||||
// allows GCC to remove its own xor that it adds to implement clz using bsr
|
||||
return x ? ((8 * sizeof(unsigned int) - 1) ^ __builtin_clz(x)) + 1 : 0;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline FOLLY_INTRINSIC_CONSTEXPR
|
||||
typename std::enable_if<
|
||||
(std::is_integral<T>::value &&
|
||||
std::is_unsigned<T>::value &&
|
||||
sizeof(T) > sizeof(unsigned int) &&
|
||||
sizeof(T) <= sizeof(unsigned long)),
|
||||
unsigned int>::type
|
||||
findLastSet(T x) {
|
||||
return x ? ((8 * sizeof(unsigned long) - 1) ^ __builtin_clzl(x)) + 1 : 0;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline FOLLY_INTRINSIC_CONSTEXPR
|
||||
typename std::enable_if<
|
||||
(std::is_integral<T>::value &&
|
||||
std::is_unsigned<T>::value &&
|
||||
sizeof(T) > sizeof(unsigned long) &&
|
||||
sizeof(T) <= sizeof(unsigned long long)),
|
||||
unsigned int>::type
|
||||
findLastSet(T x) {
|
||||
return x ? ((8 * sizeof(unsigned long long) - 1) ^ __builtin_clzll(x)) + 1
|
||||
: 0;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline FOLLY_INTRINSIC_CONSTEXPR
|
||||
typename std::enable_if<
|
||||
(std::is_integral<T>::value &&
|
||||
std::is_signed<T>::value),
|
||||
unsigned int>::type
|
||||
findLastSet(T x) {
|
||||
return findLastSet(static_cast<typename std::make_unsigned<T>::type>(x));
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline FOLLY_INTRINSIC_CONSTEXPR
|
||||
typename std::enable_if<
|
||||
std::is_integral<T>::value && std::is_unsigned<T>::value,
|
||||
T>::type
|
||||
nextPowTwo(T v) {
|
||||
return v ? (T(1) << findLastSet(v - 1)) : 1;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline FOLLY_INTRINSIC_CONSTEXPR typename std::
|
||||
enable_if<std::is_integral<T>::value && std::is_unsigned<T>::value, T>::type
|
||||
prevPowTwo(T v) {
|
||||
return v ? (T(1) << (findLastSet(v) - 1)) : 0;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline constexpr typename std::enable_if<
|
||||
std::is_integral<T>::value && std::is_unsigned<T>::value,
|
||||
bool>::type
|
||||
isPowTwo(T v) {
|
||||
return (v != 0) && !(v & (v - 1));
|
||||
}
|
||||
|
||||
/**
|
||||
* Population count
|
||||
*/
|
||||
template <class T>
|
||||
inline typename std::enable_if<
|
||||
(std::is_integral<T>::value &&
|
||||
std::is_unsigned<T>::value &&
|
||||
sizeof(T) <= sizeof(unsigned int)),
|
||||
size_t>::type
|
||||
popcount(T x) {
|
||||
return detail::popcount(x);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline typename std::enable_if<
|
||||
(std::is_integral<T>::value &&
|
||||
std::is_unsigned<T>::value &&
|
||||
sizeof(T) > sizeof(unsigned int) &&
|
||||
sizeof(T) <= sizeof(unsigned long long)),
|
||||
size_t>::type
|
||||
popcount(T x) {
|
||||
return detail::popcountll(x);
|
||||
}
|
||||
|
||||
/**
|
||||
* Endianness detection and manipulation primitives.
|
||||
*/
|
||||
namespace detail {
|
||||
|
||||
template <class T>
|
||||
struct EndianIntBase {
|
||||
public:
|
||||
static T swap(T x);
|
||||
};
|
||||
|
||||
#ifndef _MSC_VER
|
||||
|
||||
/**
|
||||
* If we have the bswap_16 macro from byteswap.h, use it; otherwise, provide our
|
||||
* own definition.
|
||||
*/
|
||||
#ifdef bswap_16
|
||||
# define our_bswap16 bswap_16
|
||||
#else
|
||||
|
||||
template<class Int16>
|
||||
inline constexpr typename std::enable_if<
|
||||
sizeof(Int16) == 2,
|
||||
Int16>::type
|
||||
our_bswap16(Int16 x) {
|
||||
return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#define FB_GEN(t, fn) \
|
||||
template<> inline t EndianIntBase<t>::swap(t x) { return fn(x); }
|
||||
|
||||
// fn(x) expands to (x) if the second argument is empty, which is exactly
|
||||
// what we want for [u]int8_t. Also, gcc 4.7 on Intel doesn't have
|
||||
// __builtin_bswap16 for some reason, so we have to provide our own.
|
||||
FB_GEN( int8_t,)
|
||||
FB_GEN(uint8_t,)
|
||||
#ifdef _MSC_VER
|
||||
FB_GEN( int64_t, _byteswap_uint64)
|
||||
FB_GEN(uint64_t, _byteswap_uint64)
|
||||
FB_GEN( int32_t, _byteswap_ulong)
|
||||
FB_GEN(uint32_t, _byteswap_ulong)
|
||||
FB_GEN( int16_t, _byteswap_ushort)
|
||||
FB_GEN(uint16_t, _byteswap_ushort)
|
||||
#else
|
||||
FB_GEN( int64_t, __builtin_bswap64)
|
||||
FB_GEN(uint64_t, __builtin_bswap64)
|
||||
FB_GEN( int32_t, __builtin_bswap32)
|
||||
FB_GEN(uint32_t, __builtin_bswap32)
|
||||
FB_GEN( int16_t, our_bswap16)
|
||||
FB_GEN(uint16_t, our_bswap16)
|
||||
#endif
|
||||
|
||||
#undef FB_GEN
|
||||
|
||||
template <class T>
|
||||
struct EndianInt : public EndianIntBase<T> {
|
||||
public:
|
||||
static T big(T x) {
|
||||
return kIsLittleEndian ? EndianInt::swap(x) : x;
|
||||
}
|
||||
static T little(T x) {
|
||||
return kIsBigEndian ? EndianInt::swap(x) : x;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
|
||||
// big* convert between native and big-endian representations
|
||||
// little* convert between native and little-endian representations
|
||||
// swap* convert between big-endian and little-endian representations
|
||||
//
|
||||
// ntohs, htons == big16
|
||||
// ntohl, htonl == big32
|
||||
#define FB_GEN1(fn, t, sz) \
|
||||
static t fn##sz(t x) { return fn<t>(x); } \
|
||||
|
||||
#define FB_GEN2(t, sz) \
|
||||
FB_GEN1(swap, t, sz) \
|
||||
FB_GEN1(big, t, sz) \
|
||||
FB_GEN1(little, t, sz)
|
||||
|
||||
#define FB_GEN(sz) \
|
||||
FB_GEN2(uint##sz##_t, sz) \
|
||||
FB_GEN2(int##sz##_t, sz)
|
||||
|
||||
class Endian {
|
||||
public:
|
||||
enum class Order : uint8_t {
|
||||
LITTLE,
|
||||
BIG
|
||||
};
|
||||
|
||||
static constexpr Order order = kIsLittleEndian ? Order::LITTLE : Order::BIG;
|
||||
|
||||
template <class T> static T swap(T x) {
|
||||
return folly::detail::EndianInt<T>::swap(x);
|
||||
}
|
||||
template <class T> static T big(T x) {
|
||||
return folly::detail::EndianInt<T>::big(x);
|
||||
}
|
||||
template <class T> static T little(T x) {
|
||||
return folly::detail::EndianInt<T>::little(x);
|
||||
}
|
||||
|
||||
#if !defined(__ANDROID__)
|
||||
FB_GEN(64)
|
||||
FB_GEN(32)
|
||||
FB_GEN(16)
|
||||
FB_GEN(8)
|
||||
#endif
|
||||
};
|
||||
|
||||
#undef FB_GEN
|
||||
#undef FB_GEN2
|
||||
#undef FB_GEN1
|
||||
|
||||
/**
|
||||
* Fast bit iteration facility.
|
||||
*/
|
||||
|
||||
|
||||
template <class BaseIter> class BitIterator;
|
||||
template <class BaseIter>
|
||||
BitIterator<BaseIter> findFirstSet(BitIterator<BaseIter>,
|
||||
BitIterator<BaseIter>);
|
||||
/**
|
||||
* Wrapper around an iterator over an integer type that iterates
|
||||
* over its underlying bits in LSb to MSb order.
|
||||
*
|
||||
* BitIterator models the same iterator concepts as the base iterator.
|
||||
*/
|
||||
template <class BaseIter>
|
||||
class BitIterator
|
||||
: public bititerator_detail::BitIteratorBase<BaseIter>::type {
|
||||
public:
|
||||
/**
|
||||
* Return the number of bits in an element of the underlying iterator.
|
||||
*/
|
||||
static unsigned int bitsPerBlock() {
|
||||
return std::numeric_limits<
|
||||
typename std::make_unsigned<
|
||||
typename std::iterator_traits<BaseIter>::value_type
|
||||
>::type
|
||||
>::digits;
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct a BitIterator that points at a given bit offset (default 0)
|
||||
* in iter.
|
||||
*/
|
||||
explicit BitIterator(const BaseIter& iter, size_t bitOff=0)
|
||||
: bititerator_detail::BitIteratorBase<BaseIter>::type(iter),
|
||||
bitOffset_(bitOff) {
|
||||
assert(bitOffset_ < bitsPerBlock());
|
||||
}
|
||||
|
||||
size_t bitOffset() const {
|
||||
return bitOffset_;
|
||||
}
|
||||
|
||||
void advanceToNextBlock() {
|
||||
bitOffset_ = 0;
|
||||
++this->base_reference();
|
||||
}
|
||||
|
||||
BitIterator& operator=(const BaseIter& other) {
|
||||
this->~BitIterator();
|
||||
new (this) BitIterator(other);
|
||||
return *this;
|
||||
}
|
||||
|
||||
private:
|
||||
friend class boost::iterator_core_access;
|
||||
friend BitIterator findFirstSet<>(BitIterator, BitIterator);
|
||||
|
||||
typedef bititerator_detail::BitReference<
|
||||
typename std::iterator_traits<BaseIter>::reference,
|
||||
typename std::iterator_traits<BaseIter>::value_type
|
||||
> BitRef;
|
||||
|
||||
void advanceInBlock(size_t n) {
|
||||
bitOffset_ += n;
|
||||
assert(bitOffset_ < bitsPerBlock());
|
||||
}
|
||||
|
||||
BitRef dereference() const {
|
||||
return BitRef(*this->base_reference(), bitOffset_);
|
||||
}
|
||||
|
||||
void advance(ssize_t n) {
|
||||
size_t bpb = bitsPerBlock();
|
||||
ssize_t blocks = n / bpb;
|
||||
bitOffset_ += n % bpb;
|
||||
if (bitOffset_ >= bpb) {
|
||||
bitOffset_ -= bpb;
|
||||
++blocks;
|
||||
}
|
||||
this->base_reference() += blocks;
|
||||
}
|
||||
|
||||
void increment() {
|
||||
if (++bitOffset_ == bitsPerBlock()) {
|
||||
advanceToNextBlock();
|
||||
}
|
||||
}
|
||||
|
||||
void decrement() {
|
||||
if (bitOffset_-- == 0) {
|
||||
bitOffset_ = bitsPerBlock() - 1;
|
||||
--this->base_reference();
|
||||
}
|
||||
}
|
||||
|
||||
bool equal(const BitIterator& other) const {
|
||||
return (bitOffset_ == other.bitOffset_ &&
|
||||
this->base_reference() == other.base_reference());
|
||||
}
|
||||
|
||||
ssize_t distance_to(const BitIterator& other) const {
|
||||
return
|
||||
(other.base_reference() - this->base_reference()) * bitsPerBlock() +
|
||||
other.bitOffset_ - bitOffset_;
|
||||
}
|
||||
|
||||
unsigned int bitOffset_;
|
||||
};
|
||||
|
||||
/**
|
||||
* Helper function, so you can write
|
||||
* auto bi = makeBitIterator(container.begin());
|
||||
*/
|
||||
template <class BaseIter>
|
||||
BitIterator<BaseIter> makeBitIterator(const BaseIter& iter) {
|
||||
return BitIterator<BaseIter>(iter);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Find first bit set in a range of bit iterators.
|
||||
* 4.5x faster than the obvious std::find(begin, end, true);
|
||||
*/
|
||||
template <class BaseIter>
|
||||
BitIterator<BaseIter> findFirstSet(BitIterator<BaseIter> begin,
|
||||
BitIterator<BaseIter> end) {
|
||||
// shortcut to avoid ugly static_cast<>
|
||||
static const typename BaseIter::value_type one = 1;
|
||||
|
||||
while (begin.base() != end.base()) {
|
||||
typename BaseIter::value_type v = *begin.base();
|
||||
// mask out the bits that don't matter (< begin.bitOffset)
|
||||
v &= ~((one << begin.bitOffset()) - 1);
|
||||
size_t firstSet = findFirstSet(v);
|
||||
if (firstSet) {
|
||||
--firstSet; // now it's 0-based
|
||||
assert(firstSet >= begin.bitOffset());
|
||||
begin.advanceInBlock(firstSet - begin.bitOffset());
|
||||
return begin;
|
||||
}
|
||||
begin.advanceToNextBlock();
|
||||
}
|
||||
|
||||
// now begin points to the same block as end
|
||||
if (end.bitOffset() != 0) { // assume end is dereferenceable
|
||||
typename BaseIter::value_type v = *begin.base();
|
||||
// mask out the bits that don't matter (< begin.bitOffset)
|
||||
v &= ~((one << begin.bitOffset()) - 1);
|
||||
// mask out the bits that don't matter (>= end.bitOffset)
|
||||
v &= (one << end.bitOffset()) - 1;
|
||||
size_t firstSet = findFirstSet(v);
|
||||
if (firstSet) {
|
||||
--firstSet; // now it's 0-based
|
||||
assert(firstSet >= begin.bitOffset());
|
||||
begin.advanceInBlock(firstSet - begin.bitOffset());
|
||||
return begin;
|
||||
}
|
||||
}
|
||||
|
||||
return end;
|
||||
}
|
||||
|
||||
|
||||
template <class T, class Enable=void> struct Unaligned;
|
||||
|
||||
/**
|
||||
* Representation of an unaligned value of a POD type.
|
||||
*/
|
||||
FOLLY_PACK_PUSH
|
||||
template <class T>
|
||||
struct Unaligned<
|
||||
T,
|
||||
typename std::enable_if<std::is_pod<T>::value>::type> {
|
||||
Unaligned() = default; // uninitialized
|
||||
/* implicit */ Unaligned(T v) : value(v) { }
|
||||
T value;
|
||||
} FOLLY_PACK_ATTR;
|
||||
FOLLY_PACK_POP
|
||||
|
||||
/**
|
||||
* Read an unaligned value of type T and return it.
|
||||
*/
|
||||
template <class T>
|
||||
inline T loadUnaligned(const void* p) {
|
||||
static_assert(sizeof(Unaligned<T>) == sizeof(T), "Invalid unaligned size");
|
||||
static_assert(alignof(Unaligned<T>) == 1, "Invalid alignment");
|
||||
if (kHasUnalignedAccess) {
|
||||
return static_cast<const Unaligned<T>*>(p)->value;
|
||||
} else {
|
||||
T value;
|
||||
memcpy(&value, p, sizeof(T));
|
||||
return value;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Write an unaligned value of type T.
|
||||
*/
|
||||
template <class T>
|
||||
inline void storeUnaligned(void* p, T value) {
|
||||
static_assert(sizeof(Unaligned<T>) == sizeof(T), "Invalid unaligned size");
|
||||
static_assert(alignof(Unaligned<T>) == 1, "Invalid alignment");
|
||||
if (kHasUnalignedAccess) {
|
||||
// Prior to C++14, the spec says that a placement new like this
|
||||
// is required to check that p is not nullptr, and to do nothing
|
||||
// if p is a nullptr. By assuming it's not a nullptr, we get a
|
||||
// nice loud segfault in optimized builds if p is nullptr, rather
|
||||
// than just silently doing nothing.
|
||||
folly::assume(p != nullptr);
|
||||
new (p) Unaligned<T>(value);
|
||||
} else {
|
||||
memcpy(p, &value, sizeof(T));
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace folly
|
||||
#include <folly/lang/Bits.h> // @shim
|
||||
|
199
ios/Pods/Folly/folly/CPortability.h
generated
199
ios/Pods/Folly/folly/CPortability.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2013-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -19,91 +19,176 @@
|
||||
/* These definitions are in a separate file so that they
|
||||
* may be included from C- as well as C++-based projects. */
|
||||
|
||||
#include <folly/portability/Config.h>
|
||||
|
||||
/**
|
||||
* Portable version check.
|
||||
*/
|
||||
#ifndef __GNUC_PREREQ
|
||||
# if defined __GNUC__ && defined __GNUC_MINOR__
|
||||
#if defined __GNUC__ && defined __GNUC_MINOR__
|
||||
/* nolint */
|
||||
# define __GNUC_PREREQ(maj, min) ((__GNUC__ << 16) + __GNUC_MINOR__ >= \
|
||||
((maj) << 16) + (min))
|
||||
# else
|
||||
#define __GNUC_PREREQ(maj, min) \
|
||||
((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min))
|
||||
#else
|
||||
/* nolint */
|
||||
# define __GNUC_PREREQ(maj, min) 0
|
||||
# endif
|
||||
#define __GNUC_PREREQ(maj, min) 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* Define a convenience macro to test when address sanitizer is being used
|
||||
* across the different compilers (e.g. clang, gcc) */
|
||||
#if defined(__clang__)
|
||||
# if __has_feature(address_sanitizer)
|
||||
# define FOLLY_SANITIZE_ADDRESS 1
|
||||
# endif
|
||||
#elif defined (__GNUC__) && \
|
||||
(((__GNUC__ == 4) && (__GNUC_MINOR__ >= 8)) || (__GNUC__ >= 5)) && \
|
||||
__SANITIZE_ADDRESS__
|
||||
# define FOLLY_SANITIZE_ADDRESS 1
|
||||
// portable version check for clang
|
||||
#ifndef __CLANG_PREREQ
|
||||
#if defined __clang__ && defined __clang_major__ && defined __clang_minor__
|
||||
/* nolint */
|
||||
#define __CLANG_PREREQ(maj, min) \
|
||||
((__clang_major__ << 16) + __clang_minor__ >= ((maj) << 16) + (min))
|
||||
#else
|
||||
/* nolint */
|
||||
#define __CLANG_PREREQ(maj, min) 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(__has_builtin)
|
||||
#define FOLLY_HAS_BUILTIN(...) __has_builtin(__VA_ARGS__)
|
||||
#else
|
||||
#define FOLLY_HAS_BUILTIN(...) 0
|
||||
#endif
|
||||
|
||||
#if defined(__has_feature)
|
||||
#define FOLLY_HAS_FEATURE(...) __has_feature(__VA_ARGS__)
|
||||
#else
|
||||
#define FOLLY_HAS_FEATURE(...) 0
|
||||
#endif
|
||||
|
||||
/* FOLLY_SANITIZE_ADDRESS is defined to 1 if the current compilation unit
|
||||
* is being compiled with ASAN enabled.
|
||||
*
|
||||
* Beware when using this macro in a header file: this macro may change values
|
||||
* across compilation units if some libraries are built with ASAN enabled
|
||||
* and some built with ASAN disabled. For instance, this may occur, if folly
|
||||
* itself was compiled without ASAN but a downstream project that uses folly is
|
||||
* compiling with ASAN enabled.
|
||||
*
|
||||
* Use FOLLY_ASAN_ENABLED (defined in folly-config.h) to check if folly itself
|
||||
* was compiled with ASAN enabled.
|
||||
*/
|
||||
#if FOLLY_HAS_FEATURE(address_sanitizer) || __SANITIZE_ADDRESS__
|
||||
#define FOLLY_SANITIZE_ADDRESS 1
|
||||
#endif
|
||||
|
||||
/* Define attribute wrapper for function attribute used to disable
|
||||
* address sanitizer instrumentation. Unfortunately, this attribute
|
||||
* has issues when inlining is used, so disable that as well. */
|
||||
#ifdef FOLLY_SANITIZE_ADDRESS
|
||||
# if defined(__clang__)
|
||||
# if __has_attribute(__no_sanitize__)
|
||||
# define FOLLY_DISABLE_ADDRESS_SANITIZER \
|
||||
__attribute__((__no_sanitize__("address"), __noinline__))
|
||||
# elif __has_attribute(__no_address_safety_analysis__)
|
||||
# define FOLLY_DISABLE_ADDRESS_SANITIZER \
|
||||
__attribute__((__no_address_safety_analysis__, __noinline__))
|
||||
# elif __has_attribute(__no_sanitize_address__)
|
||||
# define FOLLY_DISABLE_ADDRESS_SANITIZER \
|
||||
__attribute__((__no_sanitize_address__, __noinline__))
|
||||
# endif
|
||||
# elif defined(__GNUC__)
|
||||
# define FOLLY_DISABLE_ADDRESS_SANITIZER \
|
||||
__attribute__((__no_address_safety_analysis__, __noinline__))
|
||||
# endif
|
||||
#if defined(__clang__)
|
||||
#if __has_attribute(__no_sanitize__)
|
||||
#define FOLLY_DISABLE_ADDRESS_SANITIZER \
|
||||
__attribute__((__no_sanitize__("address"), __noinline__))
|
||||
#elif __has_attribute(__no_address_safety_analysis__)
|
||||
#define FOLLY_DISABLE_ADDRESS_SANITIZER \
|
||||
__attribute__((__no_address_safety_analysis__, __noinline__))
|
||||
#elif __has_attribute(__no_sanitize_address__)
|
||||
#define FOLLY_DISABLE_ADDRESS_SANITIZER \
|
||||
__attribute__((__no_sanitize_address__, __noinline__))
|
||||
#endif
|
||||
#elif defined(__GNUC__)
|
||||
#define FOLLY_DISABLE_ADDRESS_SANITIZER \
|
||||
__attribute__((__no_address_safety_analysis__, __noinline__))
|
||||
#endif
|
||||
#endif
|
||||
#ifndef FOLLY_DISABLE_ADDRESS_SANITIZER
|
||||
# define FOLLY_DISABLE_ADDRESS_SANITIZER
|
||||
#define FOLLY_DISABLE_ADDRESS_SANITIZER
|
||||
#endif
|
||||
|
||||
/* Define a convenience macro to test when thread sanitizer is being used
|
||||
* across the different compilers (e.g. clang, gcc) */
|
||||
#if defined(__clang__)
|
||||
# if __has_feature(thread_sanitizer)
|
||||
# define FOLLY_SANITIZE_THREAD 1
|
||||
# endif
|
||||
#elif defined(__GNUC__) && __SANITIZE_THREAD__
|
||||
# define FOLLY_SANITIZE_THREAD 1
|
||||
#if FOLLY_HAS_FEATURE(thread_sanitizer) || __SANITIZE_THREAD__
|
||||
#define FOLLY_SANITIZE_THREAD 1
|
||||
#endif
|
||||
|
||||
/**
|
||||
* ASAN/MSAN/TSAN define pre-processor symbols:
|
||||
* ADDRESS_SANITIZER/MEMORY_SANITIZER/THREAD_SANITIZER.
|
||||
*
|
||||
* UBSAN doesn't define anything and makes it hard to
|
||||
* conditionally compile.
|
||||
*
|
||||
* The build system should define UNDEFINED_SANITIZER=1 when UBSAN is
|
||||
* used as folly whitelists some functions.
|
||||
* Define a convenience macro to test when ASAN, UBSAN or TSAN sanitizer are
|
||||
* being used
|
||||
*/
|
||||
#if UNDEFINED_SANITIZER
|
||||
# define UBSAN_DISABLE(x) __attribute__((no_sanitize(x)))
|
||||
#if defined(FOLLY_SANITIZE_ADDRESS) || defined(FOLLY_SANITIZE_THREAD)
|
||||
#define FOLLY_SANITIZE 1
|
||||
#endif
|
||||
|
||||
#if FOLLY_SANITIZE
|
||||
#define FOLLY_DISABLE_UNDEFINED_BEHAVIOR_SANITIZER(...) \
|
||||
__attribute__((no_sanitize(__VA_ARGS__)))
|
||||
#else
|
||||
# define UBSAN_DISABLE(x)
|
||||
#endif // UNDEFINED_SANITIZER
|
||||
#define FOLLY_DISABLE_UNDEFINED_BEHAVIOR_SANITIZER(...)
|
||||
#endif // FOLLY_SANITIZE
|
||||
|
||||
/**
|
||||
* Macro for marking functions as having public visibility.
|
||||
*/
|
||||
#if defined(__GNUC__)
|
||||
# if __GNUC_PREREQ(4, 9)
|
||||
# define FOLLY_EXPORT [[gnu::visibility("default")]]
|
||||
# else
|
||||
# define FOLLY_EXPORT __attribute__((__visibility__("default")))
|
||||
# endif
|
||||
#if __GNUC_PREREQ(4, 9)
|
||||
#define FOLLY_EXPORT [[gnu::visibility("default")]]
|
||||
#else
|
||||
# define FOLLY_EXPORT
|
||||
#define FOLLY_EXPORT __attribute__((__visibility__("default")))
|
||||
#endif
|
||||
#else
|
||||
#define FOLLY_EXPORT
|
||||
#endif
|
||||
|
||||
// noinline
|
||||
#ifdef _MSC_VER
|
||||
#define FOLLY_NOINLINE __declspec(noinline)
|
||||
#elif defined(__clang__) || defined(__GNUC__)
|
||||
#define FOLLY_NOINLINE __attribute__((__noinline__))
|
||||
#else
|
||||
#define FOLLY_NOINLINE
|
||||
#endif
|
||||
|
||||
// always inline
|
||||
#ifdef _MSC_VER
|
||||
#define FOLLY_ALWAYS_INLINE __forceinline
|
||||
#elif defined(__clang__) || defined(__GNUC__)
|
||||
#define FOLLY_ALWAYS_INLINE inline __attribute__((__always_inline__))
|
||||
#else
|
||||
#define FOLLY_ALWAYS_INLINE inline
|
||||
#endif
|
||||
|
||||
// attribute hidden
|
||||
#if _MSC_VER
|
||||
#define FOLLY_ATTR_VISIBILITY_HIDDEN
|
||||
#elif defined(__clang__) || defined(__GNUC__)
|
||||
#define FOLLY_ATTR_VISIBILITY_HIDDEN __attribute__((__visibility__("hidden")))
|
||||
#else
|
||||
#define FOLLY_ATTR_VISIBILITY_HIDDEN
|
||||
#endif
|
||||
|
||||
// An attribute for marking symbols as weak, if supported
|
||||
#if FOLLY_HAVE_WEAK_SYMBOLS
|
||||
#define FOLLY_ATTR_WEAK __attribute__((__weak__))
|
||||
#else
|
||||
#define FOLLY_ATTR_WEAK
|
||||
#endif
|
||||
|
||||
// Microsoft ABI version (can be overridden manually if necessary)
|
||||
#ifndef FOLLY_MICROSOFT_ABI_VER
|
||||
#ifdef _MSC_VER
|
||||
#define FOLLY_MICROSOFT_ABI_VER _MSC_VER
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// These functions are defined by the TSAN runtime library and enable
|
||||
// annotating mutexes for TSAN.
|
||||
extern "C" FOLLY_ATTR_WEAK void
|
||||
AnnotateRWLockCreate(const char* f, int l, const volatile void* addr);
|
||||
extern "C" FOLLY_ATTR_WEAK void
|
||||
AnnotateRWLockCreateStatic(const char* f, int l, const volatile void* addr);
|
||||
extern "C" FOLLY_ATTR_WEAK void
|
||||
AnnotateRWLockDestroy(const char* f, int l, const volatile void* addr);
|
||||
extern "C" FOLLY_ATTR_WEAK void
|
||||
AnnotateRWLockAcquired(const char* f, int l, const volatile void* addr, long w);
|
||||
extern "C" FOLLY_ATTR_WEAK void
|
||||
AnnotateRWLockReleased(const char* f, int l, const volatile void* addr, long w);
|
||||
extern "C" FOLLY_ATTR_WEAK void AnnotateBenignRaceSized(
|
||||
const char* f,
|
||||
int l,
|
||||
const volatile void* addr,
|
||||
long size,
|
||||
const char* desc);
|
||||
|
53
ios/Pods/Folly/folly/CachelinePadded.h
generated
53
ios/Pods/Folly/folly/CachelinePadded.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2016-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -16,48 +16,37 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <folly/detail/CachelinePaddedImpl.h>
|
||||
#include <cstddef>
|
||||
#include <utility>
|
||||
|
||||
#include <folly/lang/Align.h>
|
||||
|
||||
namespace folly {
|
||||
|
||||
/**
|
||||
* Holds a type T, in addition to enough padding to round the size up to the
|
||||
* next multiple of the false sharing range used by folly.
|
||||
* Holds a type T, in addition to enough padding to ensure that it isn't subject
|
||||
* to false sharing within the range used by folly.
|
||||
*
|
||||
* If T is standard-layout, then casting a T* you get from this class to a
|
||||
* CachelinePadded<T>* is safe.
|
||||
*
|
||||
* This class handles padding, but imperfectly handles alignment. (Note that
|
||||
* alignment matters for false-sharing: imagine a cacheline size of 64, and two
|
||||
* adjacent 64-byte objects, with the first starting at an offset of 32. The
|
||||
* last 32 bytes of the first object share a cacheline with the first 32 bytes
|
||||
* of the second.). We alignas this class to be at least cacheline-sized, but
|
||||
* it's implementation-defined what that means (since a cacheline is almost
|
||||
* certainly larger than the maximum natural alignment). The following should be
|
||||
* true for recent compilers on common architectures:
|
||||
*
|
||||
* For heap objects, alignment needs to be handled at the allocator level, such
|
||||
* as with posix_memalign (this isn't necessary with jemalloc, which aligns
|
||||
* objects that are a multiple of cacheline size to a cacheline).
|
||||
*
|
||||
* For static and stack objects, the alignment should be obeyed, and no specific
|
||||
* intervention is necessary.
|
||||
* If `sizeof(T) <= alignof(T)` then the inner `T` will be entirely within one
|
||||
* false sharing range (AKA cache line).
|
||||
*/
|
||||
template <typename T>
|
||||
class CachelinePadded {
|
||||
static_assert(
|
||||
alignof(T) <= max_align_v,
|
||||
"CachelinePadded does not support over-aligned types.");
|
||||
|
||||
public:
|
||||
template <typename... Args>
|
||||
explicit CachelinePadded(Args&&... args)
|
||||
: impl_(std::forward<Args>(args)...) {}
|
||||
|
||||
CachelinePadded() {}
|
||||
: inner_(std::forward<Args>(args)...) {}
|
||||
|
||||
T* get() {
|
||||
return &impl_.item;
|
||||
return &inner_;
|
||||
}
|
||||
|
||||
const T* get() const {
|
||||
return &impl_.item;
|
||||
return &inner_;
|
||||
}
|
||||
|
||||
T* operator->() {
|
||||
@ -77,6 +66,12 @@ class CachelinePadded {
|
||||
}
|
||||
|
||||
private:
|
||||
detail::CachelinePaddedImpl<T> impl_;
|
||||
static constexpr size_t paddingSize() noexcept {
|
||||
return hardware_destructive_interference_size -
|
||||
(alignof(T) % hardware_destructive_interference_size);
|
||||
}
|
||||
char paddingPre_[paddingSize()];
|
||||
T inner_;
|
||||
char paddingPost_[paddingSize()];
|
||||
};
|
||||
}
|
||||
} // namespace folly
|
||||
|
79
ios/Pods/Folly/folly/CallOnce.h
generated
79
ios/Pods/Folly/folly/CallOnce.h
generated
@ -1,79 +0,0 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Drop-in replacement for std::call_once() with a fast path, which the GCC
|
||||
* implementation lacks. The tradeoff is a slightly larger `once_flag' struct
|
||||
* (8 bytes vs 4 bytes with GCC on Linux/x64).
|
||||
*
|
||||
* $ call_once_test --benchmark --bm_min_iters=100000000 --threads=16
|
||||
* ============================================================================
|
||||
* folly/test/CallOnceTest.cpp relative time/iter iters/s
|
||||
* ============================================================================
|
||||
* StdCallOnceBench 3.54ns 282.82M
|
||||
* FollyCallOnceBench 698.48ps 1.43G
|
||||
* ============================================================================
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <mutex>
|
||||
#include <utility>
|
||||
|
||||
#include <folly/Likely.h>
|
||||
#include <folly/Portability.h>
|
||||
|
||||
namespace folly {
|
||||
|
||||
class once_flag {
|
||||
public:
|
||||
constexpr once_flag() noexcept = default;
|
||||
once_flag(const once_flag&) = delete;
|
||||
once_flag& operator=(const once_flag&) = delete;
|
||||
|
||||
template <typename Callable, class... Args>
|
||||
friend void call_once(once_flag& flag, Callable&& f, Args&&... args);
|
||||
template <typename Callable, class... Args>
|
||||
friend void call_once_impl_no_inline(once_flag& flag,
|
||||
Callable&& f,
|
||||
Args&&... args);
|
||||
|
||||
private:
|
||||
std::atomic<bool> called_{false};
|
||||
std::once_flag std_once_flag_;
|
||||
};
|
||||
|
||||
template <class Callable, class... Args>
|
||||
void FOLLY_ALWAYS_INLINE
|
||||
call_once(once_flag& flag, Callable&& f, Args&&... args) {
|
||||
if (LIKELY(flag.called_.load(std::memory_order_acquire))) {
|
||||
return;
|
||||
}
|
||||
call_once_impl_no_inline(
|
||||
flag, std::forward<Callable>(f), std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
// Implementation detail: out-of-line slow path
|
||||
template <class Callable, class... Args>
|
||||
void FOLLY_NOINLINE
|
||||
call_once_impl_no_inline(once_flag& flag, Callable&& f, Args&&... args) {
|
||||
std::call_once(flag.std_once_flag_,
|
||||
std::forward<Callable>(f),
|
||||
std::forward<Args>(args)...);
|
||||
flag.called_.store(true, std::memory_order_release);
|
||||
}
|
||||
}
|
190
ios/Pods/Folly/folly/Chrono.h
generated
Normal file
190
ios/Pods/Folly/folly/Chrono.h
generated
Normal file
@ -0,0 +1,190 @@
|
||||
/*
|
||||
* Copyright 2017-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <chrono>
|
||||
#include <stdexcept>
|
||||
#include <type_traits>
|
||||
|
||||
#include <folly/Portability.h>
|
||||
#include <folly/lang/Exception.h>
|
||||
#include <folly/portability/Time.h>
|
||||
|
||||
/***
|
||||
* include or backport:
|
||||
* * std::chrono::ceil
|
||||
* * std::chrono::floor
|
||||
* * std::chrono::round
|
||||
*/
|
||||
|
||||
#if __cpp_lib_chrono >= 201510 || _MSC_VER
|
||||
|
||||
namespace folly {
|
||||
namespace chrono {
|
||||
|
||||
/* using override */ using std::chrono::ceil;
|
||||
/* using override */ using std::chrono::floor;
|
||||
/* using override */ using std::chrono::round;
|
||||
} // namespace chrono
|
||||
} // namespace folly
|
||||
|
||||
#else
|
||||
|
||||
namespace folly {
|
||||
namespace chrono {
|
||||
|
||||
namespace detail {
|
||||
|
||||
// from: http://en.cppreference.com/w/cpp/chrono/duration/ceil, CC-BY-SA
|
||||
template <typename T>
|
||||
struct is_duration : std::false_type {};
|
||||
template <typename Rep, typename Period>
|
||||
struct is_duration<std::chrono::duration<Rep, Period>> : std::true_type {};
|
||||
|
||||
template <typename To, typename Duration>
|
||||
constexpr To ceil_impl(Duration const& d, To const& t) {
|
||||
return t < d ? t + To{1} : t;
|
||||
}
|
||||
|
||||
template <typename To, typename Duration>
|
||||
constexpr To floor_impl(Duration const& d, To const& t) {
|
||||
return t > d ? t - To{1} : t;
|
||||
}
|
||||
|
||||
template <typename To, typename Diff>
|
||||
constexpr To round_impl(To const& t0, To const& t1, Diff diff0, Diff diff1) {
|
||||
return diff0 < diff1 ? t0 : diff1 < diff0 ? t1 : t0.count() & 1 ? t1 : t0;
|
||||
}
|
||||
|
||||
template <typename To, typename Duration>
|
||||
constexpr To round_impl(Duration const& d, To const& t0, To const& t1) {
|
||||
return round_impl(t0, t1, d - t0, t1 - d);
|
||||
}
|
||||
|
||||
template <typename To, typename Duration>
|
||||
constexpr To round_impl(Duration const& d, To const& t0) {
|
||||
return round_impl(d, t0, t0 + To{1});
|
||||
}
|
||||
} // namespace detail
|
||||
|
||||
// mimic: std::chrono::ceil, C++17
|
||||
// from: http://en.cppreference.com/w/cpp/chrono/duration/ceil, CC-BY-SA
|
||||
template <
|
||||
typename To,
|
||||
typename Rep,
|
||||
typename Period,
|
||||
typename = typename std::enable_if<detail::is_duration<To>::value>::type>
|
||||
constexpr To ceil(std::chrono::duration<Rep, Period> const& d) {
|
||||
return detail::ceil_impl(d, std::chrono::duration_cast<To>(d));
|
||||
}
|
||||
|
||||
// mimic: std::chrono::ceil, C++17
|
||||
// from: http://en.cppreference.com/w/cpp/chrono/time_point/ceil, CC-BY-SA
|
||||
template <
|
||||
typename To,
|
||||
typename Clock,
|
||||
typename Duration,
|
||||
typename = typename std::enable_if<detail::is_duration<To>::value>::type>
|
||||
constexpr std::chrono::time_point<Clock, To> ceil(
|
||||
std::chrono::time_point<Clock, Duration> const& tp) {
|
||||
return std::chrono::time_point<Clock, To>{ceil<To>(tp.time_since_epoch())};
|
||||
}
|
||||
|
||||
// mimic: std::chrono::floor, C++17
|
||||
// from: http://en.cppreference.com/w/cpp/chrono/duration/floor, CC-BY-SA
|
||||
template <
|
||||
typename To,
|
||||
typename Rep,
|
||||
typename Period,
|
||||
typename = typename std::enable_if<detail::is_duration<To>::value>::type>
|
||||
constexpr To floor(std::chrono::duration<Rep, Period> const& d) {
|
||||
return detail::floor_impl(d, std::chrono::duration_cast<To>(d));
|
||||
}
|
||||
|
||||
// mimic: std::chrono::floor, C++17
|
||||
// from: http://en.cppreference.com/w/cpp/chrono/time_point/floor, CC-BY-SA
|
||||
template <
|
||||
typename To,
|
||||
typename Clock,
|
||||
typename Duration,
|
||||
typename = typename std::enable_if<detail::is_duration<To>::value>::type>
|
||||
constexpr std::chrono::time_point<Clock, To> floor(
|
||||
std::chrono::time_point<Clock, Duration> const& tp) {
|
||||
return std::chrono::time_point<Clock, To>{floor<To>(tp.time_since_epoch())};
|
||||
}
|
||||
|
||||
// mimic: std::chrono::round, C++17
|
||||
// from: http://en.cppreference.com/w/cpp/chrono/duration/round, CC-BY-SA
|
||||
template <
|
||||
typename To,
|
||||
typename Rep,
|
||||
typename Period,
|
||||
typename = typename std::enable_if<
|
||||
detail::is_duration<To>::value &&
|
||||
!std::chrono::treat_as_floating_point<typename To::rep>::value>::type>
|
||||
constexpr To round(std::chrono::duration<Rep, Period> const& d) {
|
||||
return detail::round_impl(d, floor<To>(d));
|
||||
}
|
||||
|
||||
// mimic: std::chrono::round, C++17
|
||||
// from: http://en.cppreference.com/w/cpp/chrono/time_point/round, CC-BY-SA
|
||||
template <
|
||||
typename To,
|
||||
typename Clock,
|
||||
typename Duration,
|
||||
typename = typename std::enable_if<
|
||||
detail::is_duration<To>::value &&
|
||||
!std::chrono::treat_as_floating_point<typename To::rep>::value>::type>
|
||||
constexpr std::chrono::time_point<Clock, To> round(
|
||||
std::chrono::time_point<Clock, Duration> const& tp) {
|
||||
return std::chrono::time_point<Clock, To>{round<To>(tp.time_since_epoch())};
|
||||
}
|
||||
} // namespace chrono
|
||||
} // namespace folly
|
||||
|
||||
#endif
|
||||
|
||||
namespace folly {
|
||||
namespace chrono {
|
||||
|
||||
struct coarse_steady_clock {
|
||||
using rep = std::chrono::milliseconds::rep;
|
||||
using period = std::chrono::milliseconds::period;
|
||||
using duration = std::chrono::duration<rep, period>;
|
||||
using time_point = std::chrono::time_point<coarse_steady_clock, duration>;
|
||||
constexpr static bool is_steady = true;
|
||||
|
||||
static time_point now() {
|
||||
#ifndef CLOCK_MONOTONIC_COARSE
|
||||
return time_point(std::chrono::duration_cast<duration>(
|
||||
std::chrono::steady_clock::now().time_since_epoch()));
|
||||
#else
|
||||
timespec ts;
|
||||
auto ret = clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
|
||||
if (ret != 0) {
|
||||
throw_exception<std::runtime_error>(
|
||||
"Error using CLOCK_MONOTONIC_COARSE.");
|
||||
}
|
||||
return time_point(std::chrono::duration_cast<duration>(
|
||||
std::chrono::seconds(ts.tv_sec) +
|
||||
std::chrono::nanoseconds(ts.tv_nsec)));
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace chrono
|
||||
} // namespace folly
|
6
ios/Pods/Folly/folly/ClockGettimeWrappers.h
generated
6
ios/Pods/Folly/folly/ClockGettimeWrappers.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2016-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -25,5 +25,5 @@ namespace chrono {
|
||||
|
||||
extern int (*clock_gettime)(clockid_t, timespec* ts);
|
||||
extern int64_t (*clock_gettime_ns)(clockid_t);
|
||||
}
|
||||
}
|
||||
} // namespace chrono
|
||||
} // namespace folly
|
||||
|
164
ios/Pods/Folly/folly/ConcurrentSkipList-inl.h
generated
164
ios/Pods/Folly/folly/ConcurrentSkipList-inl.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2011-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -26,60 +26,68 @@
|
||||
#include <mutex>
|
||||
#include <type_traits>
|
||||
#include <vector>
|
||||
|
||||
#include <boost/noncopyable.hpp>
|
||||
#include <boost/random.hpp>
|
||||
#include <boost/type_traits.hpp>
|
||||
#include <glog/logging.h>
|
||||
|
||||
#include <folly/Memory.h>
|
||||
#include <folly/MicroSpinLock.h>
|
||||
#include <folly/ThreadLocal.h>
|
||||
#include <folly/synchronization/MicroSpinLock.h>
|
||||
|
||||
namespace folly { namespace detail {
|
||||
namespace folly {
|
||||
namespace detail {
|
||||
|
||||
template<typename ValT, typename NodeT> class csl_iterator;
|
||||
template <typename ValT, typename NodeT>
|
||||
class csl_iterator;
|
||||
|
||||
template<typename T>
|
||||
template <typename T>
|
||||
class SkipListNode : private boost::noncopyable {
|
||||
enum {
|
||||
enum : uint16_t {
|
||||
IS_HEAD_NODE = 1,
|
||||
MARKED_FOR_REMOVAL = (1 << 1),
|
||||
FULLY_LINKED = (1 << 2),
|
||||
};
|
||||
|
||||
public:
|
||||
typedef T value_type;
|
||||
|
||||
template<typename NodeAlloc, typename U,
|
||||
typename=typename std::enable_if<std::is_convertible<U, T>::value>::type>
|
||||
static SkipListNode* create(
|
||||
NodeAlloc& alloc, int height, U&& data, bool isHead = false) {
|
||||
template <
|
||||
typename NodeAlloc,
|
||||
typename U,
|
||||
typename =
|
||||
typename std::enable_if<std::is_convertible<U, T>::value>::type>
|
||||
static SkipListNode*
|
||||
create(NodeAlloc& alloc, int height, U&& data, bool isHead = false) {
|
||||
DCHECK(height >= 1 && height < 64) << height;
|
||||
|
||||
size_t size = sizeof(SkipListNode) +
|
||||
height * sizeof(std::atomic<SkipListNode*>);
|
||||
auto* node = static_cast<SkipListNode*>(alloc.allocate(size));
|
||||
size_t size =
|
||||
sizeof(SkipListNode) + height * sizeof(std::atomic<SkipListNode*>);
|
||||
auto storage = std::allocator_traits<NodeAlloc>::allocate(alloc, size);
|
||||
// do placement new
|
||||
new (node) SkipListNode(height, std::forward<U>(data), isHead);
|
||||
return node;
|
||||
return new (storage)
|
||||
SkipListNode(uint8_t(height), std::forward<U>(data), isHead);
|
||||
}
|
||||
|
||||
template<typename NodeAlloc>
|
||||
template <typename NodeAlloc>
|
||||
static void destroy(NodeAlloc& alloc, SkipListNode* node) {
|
||||
size_t size = sizeof(SkipListNode) +
|
||||
node->height_ * sizeof(std::atomic<SkipListNode*>);
|
||||
node->~SkipListNode();
|
||||
alloc.deallocate(node);
|
||||
std::allocator_traits<NodeAlloc>::deallocate(alloc, node, size);
|
||||
}
|
||||
|
||||
template<typename NodeAlloc>
|
||||
static constexpr bool destroyIsNoOp() {
|
||||
return IsArenaAllocator<NodeAlloc>::value &&
|
||||
boost::has_trivial_destructor<SkipListNode>::value;
|
||||
}
|
||||
template <typename NodeAlloc>
|
||||
struct DestroyIsNoOp : StrictConjunction<
|
||||
AllocatorHasTrivialDeallocate<NodeAlloc>,
|
||||
boost::has_trivial_destructor<SkipListNode>> {};
|
||||
|
||||
// copy the head node to a new head node assuming lock acquired
|
||||
SkipListNode* copyHead(SkipListNode* node) {
|
||||
DCHECK(node != nullptr && height_ > node->height_);
|
||||
setFlags(node->getFlags());
|
||||
for (int i = 0; i < node->height_; ++i) {
|
||||
for (uint8_t i = 0; i < node->height_; ++i) {
|
||||
setSkip(i, node->skip(i));
|
||||
}
|
||||
return this;
|
||||
@ -93,9 +101,9 @@ class SkipListNode : private boost::noncopyable {
|
||||
// next valid node as in the linked list
|
||||
SkipListNode* next() {
|
||||
SkipListNode* node;
|
||||
for (node = skip(0);
|
||||
(node != nullptr && node->markedForRemoval());
|
||||
node = node->skip(0)) {}
|
||||
for (node = skip(0); (node != nullptr && node->markedForRemoval());
|
||||
node = node->skip(0)) {
|
||||
}
|
||||
return node;
|
||||
}
|
||||
|
||||
@ -104,37 +112,53 @@ class SkipListNode : private boost::noncopyable {
|
||||
skip_[h].store(next, std::memory_order_release);
|
||||
}
|
||||
|
||||
value_type& data() { return data_; }
|
||||
const value_type& data() const { return data_; }
|
||||
int maxLayer() const { return height_ - 1; }
|
||||
int height() const { return height_; }
|
||||
value_type& data() {
|
||||
return data_;
|
||||
}
|
||||
const value_type& data() const {
|
||||
return data_;
|
||||
}
|
||||
int maxLayer() const {
|
||||
return height_ - 1;
|
||||
}
|
||||
int height() const {
|
||||
return height_;
|
||||
}
|
||||
|
||||
std::unique_lock<MicroSpinLock> acquireGuard() {
|
||||
return std::unique_lock<MicroSpinLock>(spinLock_);
|
||||
}
|
||||
|
||||
bool fullyLinked() const { return getFlags() & FULLY_LINKED; }
|
||||
bool markedForRemoval() const { return getFlags() & MARKED_FOR_REMOVAL; }
|
||||
bool isHeadNode() const { return getFlags() & IS_HEAD_NODE; }
|
||||
bool fullyLinked() const {
|
||||
return getFlags() & FULLY_LINKED;
|
||||
}
|
||||
bool markedForRemoval() const {
|
||||
return getFlags() & MARKED_FOR_REMOVAL;
|
||||
}
|
||||
bool isHeadNode() const {
|
||||
return getFlags() & IS_HEAD_NODE;
|
||||
}
|
||||
|
||||
void setIsHeadNode() {
|
||||
setFlags(getFlags() | IS_HEAD_NODE);
|
||||
setFlags(uint16_t(getFlags() | IS_HEAD_NODE));
|
||||
}
|
||||
void setFullyLinked() {
|
||||
setFlags(getFlags() | FULLY_LINKED);
|
||||
setFlags(uint16_t(getFlags() | FULLY_LINKED));
|
||||
}
|
||||
void setMarkedForRemoval() {
|
||||
setFlags(getFlags() | MARKED_FOR_REMOVAL);
|
||||
setFlags(uint16_t(getFlags() | MARKED_FOR_REMOVAL));
|
||||
}
|
||||
|
||||
private:
|
||||
// Note! this can only be called from create() as a placement new.
|
||||
template<typename U>
|
||||
SkipListNode(uint8_t height, U&& data, bool isHead) :
|
||||
height_(height), data_(std::forward<U>(data)) {
|
||||
template <typename U>
|
||||
SkipListNode(uint8_t height, U&& data, bool isHead)
|
||||
: height_(height), data_(std::forward<U>(data)) {
|
||||
spinLock_.init();
|
||||
setFlags(0);
|
||||
if (isHead) setIsHeadNode();
|
||||
if (isHead) {
|
||||
setIsHeadNode();
|
||||
}
|
||||
// need to explicitly init the dynamic atomic pointer array
|
||||
for (uint8_t i = 0; i < height_; ++i) {
|
||||
new (&skip_[i]) std::atomic<SkipListNode*>(nullptr);
|
||||
@ -169,9 +193,10 @@ class SkipListNode : private boost::noncopyable {
|
||||
|
||||
class SkipListRandomHeight {
|
||||
enum { kMaxHeight = 64 };
|
||||
|
||||
public:
|
||||
// make it a singleton.
|
||||
static SkipListRandomHeight *instance() {
|
||||
static SkipListRandomHeight* instance() {
|
||||
static SkipListRandomHeight instance_;
|
||||
return &instance_;
|
||||
}
|
||||
@ -193,7 +218,9 @@ class SkipListRandomHeight {
|
||||
}
|
||||
|
||||
private:
|
||||
SkipListRandomHeight() { initLookupTable(); }
|
||||
SkipListRandomHeight() {
|
||||
initLookupTable();
|
||||
}
|
||||
|
||||
void initLookupTable() {
|
||||
// set skip prob = 1/E
|
||||
@ -208,9 +235,9 @@ class SkipListRandomHeight {
|
||||
p *= kProb;
|
||||
sizeLimit *= kProbInv;
|
||||
lookupTable_[i] = lookupTable_[i - 1] + p;
|
||||
sizeLimitTable_[i] = sizeLimit > kMaxSizeLimit ?
|
||||
kMaxSizeLimit :
|
||||
static_cast<size_t>(sizeLimit);
|
||||
sizeLimitTable_[i] = sizeLimit > kMaxSizeLimit
|
||||
? kMaxSizeLimit
|
||||
: static_cast<size_t>(sizeLimit);
|
||||
}
|
||||
lookupTable_[kMaxHeight - 1] = 1;
|
||||
sizeLimitTable_[kMaxHeight - 1] = kMaxSizeLimit;
|
||||
@ -225,17 +252,24 @@ class SkipListRandomHeight {
|
||||
size_t sizeLimitTable_[kMaxHeight];
|
||||
};
|
||||
|
||||
template<typename NodeType, typename NodeAlloc, typename = void>
|
||||
template <typename NodeType, typename NodeAlloc, typename = void>
|
||||
class NodeRecycler;
|
||||
|
||||
template<typename NodeType, typename NodeAlloc>
|
||||
class NodeRecycler<NodeType, NodeAlloc, typename std::enable_if<
|
||||
!NodeType::template destroyIsNoOp<NodeAlloc>()>::type> {
|
||||
template <typename NodeType, typename NodeAlloc>
|
||||
class NodeRecycler<
|
||||
NodeType,
|
||||
NodeAlloc,
|
||||
typename std::enable_if<
|
||||
!NodeType::template DestroyIsNoOp<NodeAlloc>::value>::type> {
|
||||
public:
|
||||
explicit NodeRecycler(const NodeAlloc& alloc)
|
||||
: refs_(0), dirty_(false), alloc_(alloc) { lock_.init(); }
|
||||
: refs_(0), dirty_(false), alloc_(alloc) {
|
||||
lock_.init();
|
||||
}
|
||||
|
||||
explicit NodeRecycler() : refs_(0), dirty_(false) { lock_.init(); }
|
||||
explicit NodeRecycler() : refs_(0), dirty_(false) {
|
||||
lock_.init();
|
||||
}
|
||||
|
||||
~NodeRecycler() {
|
||||
CHECK_EQ(refs(), 0);
|
||||
@ -249,7 +283,7 @@ class NodeRecycler<NodeType, NodeAlloc, typename std::enable_if<
|
||||
void add(NodeType* node) {
|
||||
std::lock_guard<MicroSpinLock> g(lock_);
|
||||
if (nodes_.get() == nullptr) {
|
||||
nodes_.reset(new std::vector<NodeType*>(1, node));
|
||||
nodes_ = std::make_unique<std::vector<NodeType*>>(1, node);
|
||||
} else {
|
||||
nodes_->push_back(node);
|
||||
}
|
||||
@ -298,7 +332,9 @@ class NodeRecycler<NodeType, NodeAlloc, typename std::enable_if<
|
||||
return refs_.fetch_add(-1, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
NodeAlloc& alloc() { return alloc_; }
|
||||
NodeAlloc& alloc() {
|
||||
return alloc_;
|
||||
}
|
||||
|
||||
private:
|
||||
int refs() const {
|
||||
@ -314,21 +350,27 @@ class NodeRecycler<NodeType, NodeAlloc, typename std::enable_if<
|
||||
|
||||
// In case of arena allocator, no recycling is necessary, and it's possible
|
||||
// to save on ConcurrentSkipList size.
|
||||
template<typename NodeType, typename NodeAlloc>
|
||||
class NodeRecycler<NodeType, NodeAlloc, typename std::enable_if<
|
||||
NodeType::template destroyIsNoOp<NodeAlloc>()>::type> {
|
||||
template <typename NodeType, typename NodeAlloc>
|
||||
class NodeRecycler<
|
||||
NodeType,
|
||||
NodeAlloc,
|
||||
typename std::enable_if<
|
||||
NodeType::template DestroyIsNoOp<NodeAlloc>::value>::type> {
|
||||
public:
|
||||
explicit NodeRecycler(const NodeAlloc& alloc) : alloc_(alloc) { }
|
||||
explicit NodeRecycler(const NodeAlloc& alloc) : alloc_(alloc) {}
|
||||
|
||||
void addRef() { }
|
||||
void releaseRef() { }
|
||||
void addRef() {}
|
||||
void releaseRef() {}
|
||||
|
||||
void add(NodeType* /* node */) {}
|
||||
|
||||
NodeAlloc& alloc() { return alloc_; }
|
||||
NodeAlloc& alloc() {
|
||||
return alloc_;
|
||||
}
|
||||
|
||||
private:
|
||||
NodeAlloc alloc_;
|
||||
};
|
||||
|
||||
}} // namespaces
|
||||
} // namespace detail
|
||||
} // namespace folly
|
||||
|
362
ios/Pods/Folly/folly/ConcurrentSkipList.h
generated
362
ios/Pods/Folly/folly/ConcurrentSkipList.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2011-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -124,27 +124,30 @@ Sample usage:
|
||||
#include <limits>
|
||||
#include <memory>
|
||||
#include <type_traits>
|
||||
|
||||
#include <boost/iterator/iterator_facade.hpp>
|
||||
#include <glog/logging.h>
|
||||
|
||||
#include <folly/ConcurrentSkipList-inl.h>
|
||||
#include <folly/Likely.h>
|
||||
#include <folly/Memory.h>
|
||||
#include <folly/MicroSpinLock.h>
|
||||
#include <folly/synchronization/MicroSpinLock.h>
|
||||
|
||||
namespace folly {
|
||||
|
||||
template<typename T,
|
||||
typename Comp = std::less<T>,
|
||||
// All nodes are allocated using provided SimpleAllocator,
|
||||
// it should be thread-safe.
|
||||
typename NodeAlloc = SysAlloc,
|
||||
int MAX_HEIGHT = 24>
|
||||
template <
|
||||
typename T,
|
||||
typename Comp = std::less<T>,
|
||||
// All nodes are allocated using provided SysAllocator,
|
||||
// it should be thread-safe.
|
||||
typename NodeAlloc = SysAllocator<void>,
|
||||
int MAX_HEIGHT = 24>
|
||||
class ConcurrentSkipList {
|
||||
// MAX_HEIGHT needs to be at least 2 to suppress compiler
|
||||
// warnings/errors (Werror=uninitialized tiggered due to preds_[1]
|
||||
// being treated as a scalar in the compiler).
|
||||
static_assert(MAX_HEIGHT >= 2 && MAX_HEIGHT < 64,
|
||||
static_assert(
|
||||
MAX_HEIGHT >= 2 && MAX_HEIGHT < 64,
|
||||
"MAX_HEIGHT can only be in the range of [2, 64)");
|
||||
typedef std::unique_lock<folly::MicroSpinLock> ScopedLocker;
|
||||
typedef ConcurrentSkipList<T, Comp, NodeAlloc, MAX_HEIGHT> SkipListType;
|
||||
@ -180,8 +183,9 @@ class ConcurrentSkipList {
|
||||
}
|
||||
|
||||
// Create a shared_ptr skiplist object with initial head height.
|
||||
static std::shared_ptr<SkipListType> createInstance(int height,
|
||||
const NodeAlloc& alloc) {
|
||||
static std::shared_ptr<SkipListType> createInstance(
|
||||
int height,
|
||||
const NodeAlloc& alloc) {
|
||||
return std::make_shared<ConcurrentSkipList>(height, alloc);
|
||||
}
|
||||
|
||||
@ -195,11 +199,11 @@ class ConcurrentSkipList {
|
||||
//===================================================================
|
||||
|
||||
~ConcurrentSkipList() {
|
||||
/* static */ if (NodeType::template destroyIsNoOp<NodeAlloc>()) {
|
||||
if /* constexpr */ (NodeType::template DestroyIsNoOp<NodeAlloc>::value) {
|
||||
// Avoid traversing the list if using arena allocator.
|
||||
return;
|
||||
}
|
||||
for (NodeType* current = head_.load(std::memory_order_relaxed); current; ) {
|
||||
for (NodeType* current = head_.load(std::memory_order_relaxed); current;) {
|
||||
NodeType* tmp = current->skip(0);
|
||||
NodeType::destroy(recycler_.alloc(), current);
|
||||
current = tmp;
|
||||
@ -207,22 +211,25 @@ class ConcurrentSkipList {
|
||||
}
|
||||
|
||||
private:
|
||||
static bool greater(const value_type &data, const NodeType *node) {
|
||||
static bool greater(const value_type& data, const NodeType* node) {
|
||||
return node && Comp()(node->data(), data);
|
||||
}
|
||||
|
||||
static bool less(const value_type &data, const NodeType *node) {
|
||||
static bool less(const value_type& data, const NodeType* node) {
|
||||
return (node == nullptr) || Comp()(data, node->data());
|
||||
}
|
||||
|
||||
static int findInsertionPoint(NodeType *cur, int cur_layer,
|
||||
const value_type &data,
|
||||
NodeType *preds[], NodeType *succs[]) {
|
||||
static int findInsertionPoint(
|
||||
NodeType* cur,
|
||||
int cur_layer,
|
||||
const value_type& data,
|
||||
NodeType* preds[],
|
||||
NodeType* succs[]) {
|
||||
int foundLayer = -1;
|
||||
NodeType *pred = cur;
|
||||
NodeType *foundNode = nullptr;
|
||||
NodeType* pred = cur;
|
||||
NodeType* foundNode = nullptr;
|
||||
for (int layer = cur_layer; layer >= 0; --layer) {
|
||||
NodeType *node = pred->skip(layer);
|
||||
NodeType* node = pred->skip(layer);
|
||||
while (greater(data, node)) {
|
||||
pred = node;
|
||||
node = node->skip(layer);
|
||||
@ -235,54 +242,61 @@ class ConcurrentSkipList {
|
||||
|
||||
// if found, succs[0..foundLayer] need to point to the cached foundNode,
|
||||
// as foundNode might be deleted at the same time thus pred->skip() can
|
||||
// return NULL or another node.
|
||||
// return nullptr or another node.
|
||||
succs[layer] = foundNode ? foundNode : node;
|
||||
}
|
||||
return foundLayer;
|
||||
}
|
||||
|
||||
size_t size() const { return size_.load(std::memory_order_relaxed); }
|
||||
size_t size() const {
|
||||
return size_.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
int height() const {
|
||||
return head_.load(std::memory_order_consume)->height();
|
||||
}
|
||||
|
||||
int maxLayer() const { return height() - 1; }
|
||||
int maxLayer() const {
|
||||
return height() - 1;
|
||||
}
|
||||
|
||||
size_t incrementSize(int delta) {
|
||||
return size_.fetch_add(delta, std::memory_order_relaxed) + delta;
|
||||
}
|
||||
|
||||
// Returns the node if found, nullptr otherwise.
|
||||
NodeType* find(const value_type &data) {
|
||||
NodeType* find(const value_type& data) {
|
||||
auto ret = findNode(data);
|
||||
if (ret.second && !ret.first->markedForRemoval()) return ret.first;
|
||||
if (ret.second && !ret.first->markedForRemoval()) {
|
||||
return ret.first;
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// lock all the necessary nodes for changing (adding or removing) the list.
|
||||
// returns true if all the lock acquried successfully and the related nodes
|
||||
// are all validate (not in certain pending states), false otherwise.
|
||||
bool lockNodesForChange(int nodeHeight,
|
||||
bool lockNodesForChange(
|
||||
int nodeHeight,
|
||||
ScopedLocker guards[MAX_HEIGHT],
|
||||
NodeType *preds[MAX_HEIGHT],
|
||||
NodeType *succs[MAX_HEIGHT],
|
||||
bool adding=true) {
|
||||
NodeType* preds[MAX_HEIGHT],
|
||||
NodeType* succs[MAX_HEIGHT],
|
||||
bool adding = true) {
|
||||
NodeType *pred, *succ, *prevPred = nullptr;
|
||||
bool valid = true;
|
||||
for (int layer = 0; valid && layer < nodeHeight; ++layer) {
|
||||
pred = preds[layer];
|
||||
DCHECK(pred != nullptr) << "layer=" << layer << " height=" << height()
|
||||
<< " nodeheight=" << nodeHeight;
|
||||
<< " nodeheight=" << nodeHeight;
|
||||
succ = succs[layer];
|
||||
if (pred != prevPred) {
|
||||
guards[layer] = pred->acquireGuard();
|
||||
prevPred = pred;
|
||||
}
|
||||
valid = !pred->markedForRemoval() &&
|
||||
pred->skip(layer) == succ; // check again after locking
|
||||
pred->skip(layer) == succ; // check again after locking
|
||||
|
||||
if (adding) { // when adding a node, the succ shouldn't be going away
|
||||
if (adding) { // when adding a node, the succ shouldn't be going away
|
||||
valid = valid && (succ == nullptr || !succ->markedForRemoval());
|
||||
}
|
||||
}
|
||||
@ -296,29 +310,30 @@ class ConcurrentSkipList {
|
||||
// list with the same key.
|
||||
// pair.second stores whether the data is added successfully:
|
||||
// 0 means not added, otherwise reutrns the new size.
|
||||
template<typename U>
|
||||
std::pair<NodeType*, size_t> addOrGetData(U &&data) {
|
||||
template <typename U>
|
||||
std::pair<NodeType*, size_t> addOrGetData(U&& data) {
|
||||
NodeType *preds[MAX_HEIGHT], *succs[MAX_HEIGHT];
|
||||
NodeType *newNode;
|
||||
NodeType* newNode;
|
||||
size_t newSize;
|
||||
while (true) {
|
||||
int max_layer = 0;
|
||||
int layer = findInsertionPointGetMaxLayer(data, preds, succs, &max_layer);
|
||||
|
||||
if (layer >= 0) {
|
||||
NodeType *nodeFound = succs[layer];
|
||||
NodeType* nodeFound = succs[layer];
|
||||
DCHECK(nodeFound != nullptr);
|
||||
if (nodeFound->markedForRemoval()) {
|
||||
continue; // if it's getting deleted retry finding node.
|
||||
continue; // if it's getting deleted retry finding node.
|
||||
}
|
||||
// wait until fully linked.
|
||||
while (UNLIKELY(!nodeFound->fullyLinked())) {}
|
||||
while (UNLIKELY(!nodeFound->fullyLinked())) {
|
||||
}
|
||||
return std::make_pair(nodeFound, 0);
|
||||
}
|
||||
|
||||
// need to capped at the original height -- the real height may have grown
|
||||
int nodeHeight = detail::SkipListRandomHeight::instance()->
|
||||
getHeight(max_layer + 1);
|
||||
int nodeHeight =
|
||||
detail::SkipListRandomHeight::instance()->getHeight(max_layer + 1);
|
||||
|
||||
ScopedLocker guards[MAX_HEIGHT];
|
||||
if (!lockNodesForChange(nodeHeight, guards, preds, succs)) {
|
||||
@ -326,8 +341,8 @@ class ConcurrentSkipList {
|
||||
}
|
||||
|
||||
// locks acquired and all valid, need to modify the links under the locks.
|
||||
newNode =
|
||||
NodeType::create(recycler_.alloc(), nodeHeight, std::forward<U>(data));
|
||||
newNode = NodeType::create(
|
||||
recycler_.alloc(), nodeHeight, std::forward<U>(data));
|
||||
for (int k = 0; k < nodeHeight; ++k) {
|
||||
newNode->setSkip(k, succs[k]);
|
||||
preds[k]->setSkip(k, newNode);
|
||||
@ -340,7 +355,7 @@ class ConcurrentSkipList {
|
||||
|
||||
int hgt = height();
|
||||
size_t sizeLimit =
|
||||
detail::SkipListRandomHeight::instance()->getSizeLimit(hgt);
|
||||
detail::SkipListRandomHeight::instance()->getSizeLimit(hgt);
|
||||
|
||||
if (hgt < MAX_HEIGHT && newSize > sizeLimit) {
|
||||
growHeight(hgt + 1);
|
||||
@ -349,12 +364,12 @@ class ConcurrentSkipList {
|
||||
return std::make_pair(newNode, newSize);
|
||||
}
|
||||
|
||||
bool remove(const value_type &data) {
|
||||
NodeType *nodeToDelete = nullptr;
|
||||
bool remove(const value_type& data) {
|
||||
NodeType* nodeToDelete = nullptr;
|
||||
ScopedLocker nodeGuard;
|
||||
bool isMarked = false;
|
||||
int nodeHeight = 0;
|
||||
NodeType* preds[MAX_HEIGHT], *succs[MAX_HEIGHT];
|
||||
NodeType *preds[MAX_HEIGHT], *succs[MAX_HEIGHT];
|
||||
|
||||
while (true) {
|
||||
int max_layer = 0;
|
||||
@ -367,7 +382,9 @@ class ConcurrentSkipList {
|
||||
nodeToDelete = succs[layer];
|
||||
nodeHeight = nodeToDelete->height();
|
||||
nodeGuard = nodeToDelete->acquireGuard();
|
||||
if (nodeToDelete->markedForRemoval()) return false;
|
||||
if (nodeToDelete->markedForRemoval()) {
|
||||
return false;
|
||||
}
|
||||
nodeToDelete->setMarkedForRemoval();
|
||||
isMarked = true;
|
||||
}
|
||||
@ -375,7 +392,7 @@ class ConcurrentSkipList {
|
||||
// acquire pred locks from bottom layer up
|
||||
ScopedLocker guards[MAX_HEIGHT];
|
||||
if (!lockNodesForChange(nodeHeight, guards, preds, succs, false)) {
|
||||
continue; // this will unlock all the locks
|
||||
continue; // this will unlock all the locks
|
||||
}
|
||||
|
||||
for (int k = nodeHeight - 1; k >= 0; --k) {
|
||||
@ -389,37 +406,41 @@ class ConcurrentSkipList {
|
||||
return true;
|
||||
}
|
||||
|
||||
const value_type *first() const {
|
||||
const value_type* first() const {
|
||||
auto node = head_.load(std::memory_order_consume)->skip(0);
|
||||
return node ? &node->data() : nullptr;
|
||||
}
|
||||
|
||||
const value_type *last() const {
|
||||
NodeType *pred = head_.load(std::memory_order_consume);
|
||||
NodeType *node = nullptr;
|
||||
const value_type* last() const {
|
||||
NodeType* pred = head_.load(std::memory_order_consume);
|
||||
NodeType* node = nullptr;
|
||||
for (int layer = maxLayer(); layer >= 0; --layer) {
|
||||
do {
|
||||
node = pred->skip(layer);
|
||||
if (node) pred = node;
|
||||
if (node) {
|
||||
pred = node;
|
||||
}
|
||||
} while (node != nullptr);
|
||||
}
|
||||
return pred == head_.load(std::memory_order_relaxed)
|
||||
? nullptr : &pred->data();
|
||||
return pred == head_.load(std::memory_order_relaxed) ? nullptr
|
||||
: &pred->data();
|
||||
}
|
||||
|
||||
static bool okToDelete(NodeType *candidate, int layer) {
|
||||
static bool okToDelete(NodeType* candidate, int layer) {
|
||||
DCHECK(candidate != nullptr);
|
||||
return candidate->fullyLinked() &&
|
||||
candidate->maxLayer() == layer &&
|
||||
!candidate->markedForRemoval();
|
||||
return candidate->fullyLinked() && candidate->maxLayer() == layer &&
|
||||
!candidate->markedForRemoval();
|
||||
}
|
||||
|
||||
// find node for insertion/deleting
|
||||
int findInsertionPointGetMaxLayer(const value_type &data,
|
||||
NodeType *preds[], NodeType *succs[], int *max_layer) const {
|
||||
int findInsertionPointGetMaxLayer(
|
||||
const value_type& data,
|
||||
NodeType* preds[],
|
||||
NodeType* succs[],
|
||||
int* max_layer) const {
|
||||
*max_layer = maxLayer();
|
||||
return findInsertionPoint(head_.load(std::memory_order_consume),
|
||||
*max_layer, data, preds, succs);
|
||||
return findInsertionPoint(
|
||||
head_.load(std::memory_order_consume), *max_layer, data, preds, succs);
|
||||
}
|
||||
|
||||
// Find node for access. Returns a paired values:
|
||||
@ -427,23 +448,26 @@ class ConcurrentSkipList {
|
||||
// pair.second = 1 when the data value is founded, or 0 otherwise.
|
||||
// This is like lower_bound, but not exact: we could have the node marked for
|
||||
// removal so still need to check that.
|
||||
std::pair<NodeType*, int> findNode(const value_type &data) const {
|
||||
std::pair<NodeType*, int> findNode(const value_type& data) const {
|
||||
return findNodeDownRight(data);
|
||||
}
|
||||
|
||||
// Find node by first stepping down then stepping right. Based on benchmark
|
||||
// results, this is slightly faster than findNodeRightDown for better
|
||||
// localality on the skipping pointers.
|
||||
std::pair<NodeType*, int> findNodeDownRight(const value_type &data) const {
|
||||
NodeType *pred = head_.load(std::memory_order_consume);
|
||||
std::pair<NodeType*, int> findNodeDownRight(const value_type& data) const {
|
||||
NodeType* pred = head_.load(std::memory_order_consume);
|
||||
int ht = pred->height();
|
||||
NodeType *node = nullptr;
|
||||
NodeType* node = nullptr;
|
||||
|
||||
bool found = false;
|
||||
while (!found) {
|
||||
// stepping down
|
||||
for (; ht > 0 && less(data, node = pred->skip(ht - 1)); --ht) {}
|
||||
if (ht == 0) return std::make_pair(node, 0); // not found
|
||||
for (; ht > 0 && less(data, node = pred->skip(ht - 1)); --ht) {
|
||||
}
|
||||
if (ht == 0) {
|
||||
return std::make_pair(node, 0); // not found
|
||||
}
|
||||
// node <= data now, but we need to fix up ht
|
||||
--ht;
|
||||
|
||||
@ -459,9 +483,9 @@ class ConcurrentSkipList {
|
||||
|
||||
// find node by first stepping right then stepping down.
|
||||
// We still keep this for reference purposes.
|
||||
std::pair<NodeType*, int> findNodeRightDown(const value_type &data) const {
|
||||
NodeType *pred = head_.load(std::memory_order_consume);
|
||||
NodeType *node = nullptr;
|
||||
std::pair<NodeType*, int> findNodeRightDown(const value_type& data) const {
|
||||
NodeType* pred = head_.load(std::memory_order_consume);
|
||||
NodeType* node = nullptr;
|
||||
auto top = maxLayer();
|
||||
int found = 0;
|
||||
for (int layer = top; !found && layer >= 0; --layer) {
|
||||
@ -475,7 +499,7 @@ class ConcurrentSkipList {
|
||||
return std::make_pair(node, found);
|
||||
}
|
||||
|
||||
NodeType* lower_bound(const value_type &data) const {
|
||||
NodeType* lower_bound(const value_type& data) const {
|
||||
auto node = findNode(data).first;
|
||||
while (node != nullptr && node->markedForRemoval()) {
|
||||
node = node->skip(0);
|
||||
@ -485,20 +509,20 @@ class ConcurrentSkipList {
|
||||
|
||||
void growHeight(int height) {
|
||||
NodeType* oldHead = head_.load(std::memory_order_consume);
|
||||
if (oldHead->height() >= height) { // someone else already did this
|
||||
if (oldHead->height() >= height) { // someone else already did this
|
||||
return;
|
||||
}
|
||||
|
||||
NodeType* newHead =
|
||||
NodeType::create(recycler_.alloc(), height, value_type(), true);
|
||||
NodeType::create(recycler_.alloc(), height, value_type(), true);
|
||||
|
||||
{ // need to guard the head node in case others are adding/removing
|
||||
// nodes linked to the head.
|
||||
ScopedLocker g = oldHead->acquireGuard();
|
||||
newHead->copyHead(oldHead);
|
||||
NodeType* expected = oldHead;
|
||||
if (!head_.compare_exchange_strong(expected, newHead,
|
||||
std::memory_order_release)) {
|
||||
if (!head_.compare_exchange_strong(
|
||||
expected, newHead, std::memory_order_release)) {
|
||||
// if someone has already done the swap, just return.
|
||||
NodeType::destroy(recycler_.alloc(), newHead);
|
||||
return;
|
||||
@ -508,7 +532,7 @@ class ConcurrentSkipList {
|
||||
recycle(oldHead);
|
||||
}
|
||||
|
||||
void recycle(NodeType *node) {
|
||||
void recycle(NodeType* node) {
|
||||
recycler_.add(node);
|
||||
}
|
||||
|
||||
@ -517,10 +541,11 @@ class ConcurrentSkipList {
|
||||
std::atomic<size_t> size_;
|
||||
};
|
||||
|
||||
template<typename T, typename Comp, typename NodeAlloc, int MAX_HEIGHT>
|
||||
template <typename T, typename Comp, typename NodeAlloc, int MAX_HEIGHT>
|
||||
class ConcurrentSkipList<T, Comp, NodeAlloc, MAX_HEIGHT>::Accessor {
|
||||
typedef detail::SkipListNode<T> NodeType;
|
||||
typedef ConcurrentSkipList<T, Comp, NodeAlloc, MAX_HEIGHT> SkipListType;
|
||||
|
||||
public:
|
||||
typedef T value_type;
|
||||
typedef T key_type;
|
||||
@ -537,8 +562,7 @@ class ConcurrentSkipList<T, Comp, NodeAlloc, MAX_HEIGHT>::Accessor {
|
||||
typedef typename SkipListType::Skipper Skipper;
|
||||
|
||||
explicit Accessor(std::shared_ptr<ConcurrentSkipList> skip_list)
|
||||
: slHolder_(std::move(skip_list))
|
||||
{
|
||||
: slHolder_(std::move(skip_list)) {
|
||||
sl_ = slHolder_.get();
|
||||
DCHECK(sl_ != nullptr);
|
||||
sl_->recycler_.addRef();
|
||||
@ -546,18 +570,17 @@ class ConcurrentSkipList<T, Comp, NodeAlloc, MAX_HEIGHT>::Accessor {
|
||||
|
||||
// Unsafe initializer: the caller assumes the responsibility to keep
|
||||
// skip_list valid during the whole life cycle of the Acessor.
|
||||
explicit Accessor(ConcurrentSkipList *skip_list) : sl_(skip_list) {
|
||||
explicit Accessor(ConcurrentSkipList* skip_list) : sl_(skip_list) {
|
||||
DCHECK(sl_ != nullptr);
|
||||
sl_->recycler_.addRef();
|
||||
}
|
||||
|
||||
Accessor(const Accessor &accessor) :
|
||||
sl_(accessor.sl_),
|
||||
slHolder_(accessor.slHolder_) {
|
||||
Accessor(const Accessor& accessor)
|
||||
: sl_(accessor.sl_), slHolder_(accessor.slHolder_) {
|
||||
sl_->recycler_.addRef();
|
||||
}
|
||||
|
||||
Accessor& operator=(const Accessor &accessor) {
|
||||
Accessor& operator=(const Accessor& accessor) {
|
||||
if (this != &accessor) {
|
||||
slHolder_ = accessor.slHolder_;
|
||||
sl_->recycler_.releaseRef();
|
||||
@ -571,40 +594,62 @@ class ConcurrentSkipList<T, Comp, NodeAlloc, MAX_HEIGHT>::Accessor {
|
||||
sl_->recycler_.releaseRef();
|
||||
}
|
||||
|
||||
bool empty() const { return sl_->size() == 0; }
|
||||
size_t size() const { return sl_->size(); }
|
||||
size_type max_size() const { return std::numeric_limits<size_type>::max(); }
|
||||
bool empty() const {
|
||||
return sl_->size() == 0;
|
||||
}
|
||||
size_t size() const {
|
||||
return sl_->size();
|
||||
}
|
||||
size_type max_size() const {
|
||||
return std::numeric_limits<size_type>::max();
|
||||
}
|
||||
|
||||
// returns end() if the value is not in the list, otherwise returns an
|
||||
// iterator pointing to the data, and it's guaranteed that the data is valid
|
||||
// as far as the Accessor is hold.
|
||||
iterator find(const key_type &value) { return iterator(sl_->find(value)); }
|
||||
const_iterator find(const key_type &value) const {
|
||||
iterator find(const key_type& value) {
|
||||
return iterator(sl_->find(value));
|
||||
}
|
||||
size_type count(const key_type &data) const { return contains(data); }
|
||||
const_iterator find(const key_type& value) const {
|
||||
return iterator(sl_->find(value));
|
||||
}
|
||||
size_type count(const key_type& data) const {
|
||||
return contains(data);
|
||||
}
|
||||
|
||||
iterator begin() const {
|
||||
NodeType* head = sl_->head_.load(std::memory_order_consume);
|
||||
return iterator(head->next());
|
||||
}
|
||||
iterator end() const { return iterator(nullptr); }
|
||||
const_iterator cbegin() const { return begin(); }
|
||||
const_iterator cend() const { return end(); }
|
||||
iterator end() const {
|
||||
return iterator(nullptr);
|
||||
}
|
||||
const_iterator cbegin() const {
|
||||
return begin();
|
||||
}
|
||||
const_iterator cend() const {
|
||||
return end();
|
||||
}
|
||||
|
||||
template<typename U,
|
||||
typename=typename std::enable_if<std::is_convertible<U, T>::value>::type>
|
||||
template <
|
||||
typename U,
|
||||
typename =
|
||||
typename std::enable_if<std::is_convertible<U, T>::value>::type>
|
||||
std::pair<iterator, bool> insert(U&& data) {
|
||||
auto ret = sl_->addOrGetData(std::forward<U>(data));
|
||||
return std::make_pair(iterator(ret.first), ret.second);
|
||||
}
|
||||
size_t erase(const key_type &data) { return remove(data); }
|
||||
size_t erase(const key_type& data) {
|
||||
return remove(data);
|
||||
}
|
||||
|
||||
iterator lower_bound(const key_type &data) const {
|
||||
iterator lower_bound(const key_type& data) const {
|
||||
return iterator(sl_->lower_bound(data));
|
||||
}
|
||||
|
||||
size_t height() const { return sl_->height(); }
|
||||
size_t height() const {
|
||||
return sl_->height();
|
||||
}
|
||||
|
||||
// first() returns pointer to the first element in the skiplist, or
|
||||
// nullptr if empty.
|
||||
@ -617,8 +662,12 @@ class ConcurrentSkipList<T, Comp, NodeAlloc, MAX_HEIGHT>::Accessor {
|
||||
// last() is not guaranteed to be the max_element(), and both of them can
|
||||
// be invalid (i.e. nullptr), so we name them differently from front() and
|
||||
// tail() here.
|
||||
const key_type *first() const { return sl_->first(); }
|
||||
const key_type *last() const { return sl_->last(); }
|
||||
const key_type* first() const {
|
||||
return sl_->first();
|
||||
}
|
||||
const key_type* last() const {
|
||||
return sl_->last();
|
||||
}
|
||||
|
||||
// Try to remove the last element in the skip list.
|
||||
//
|
||||
@ -630,31 +679,40 @@ class ConcurrentSkipList<T, Comp, NodeAlloc, MAX_HEIGHT>::Accessor {
|
||||
return last ? sl_->remove(*last) : false;
|
||||
}
|
||||
|
||||
std::pair<key_type*, bool> addOrGetData(const key_type &data) {
|
||||
std::pair<key_type*, bool> addOrGetData(const key_type& data) {
|
||||
auto ret = sl_->addOrGetData(data);
|
||||
return std::make_pair(&ret.first->data(), ret.second);
|
||||
}
|
||||
|
||||
SkipListType* skiplist() const { return sl_; }
|
||||
SkipListType* skiplist() const {
|
||||
return sl_;
|
||||
}
|
||||
|
||||
// legacy interfaces
|
||||
// TODO:(xliu) remove these.
|
||||
// Returns true if the node is added successfully, false if not, i.e. the
|
||||
// node with the same key already existed in the list.
|
||||
bool contains(const key_type &data) const { return sl_->find(data); }
|
||||
bool add(const key_type &data) { return sl_->addOrGetData(data).second; }
|
||||
bool remove(const key_type &data) { return sl_->remove(data); }
|
||||
bool contains(const key_type& data) const {
|
||||
return sl_->find(data);
|
||||
}
|
||||
bool add(const key_type& data) {
|
||||
return sl_->addOrGetData(data).second;
|
||||
}
|
||||
bool remove(const key_type& data) {
|
||||
return sl_->remove(data);
|
||||
}
|
||||
|
||||
private:
|
||||
SkipListType *sl_;
|
||||
SkipListType* sl_;
|
||||
std::shared_ptr<SkipListType> slHolder_;
|
||||
};
|
||||
|
||||
// implements forward iterator concept.
|
||||
template<typename ValT, typename NodeT>
|
||||
class detail::csl_iterator :
|
||||
public boost::iterator_facade<csl_iterator<ValT, NodeT>,
|
||||
ValT, boost::forward_traversal_tag> {
|
||||
template <typename ValT, typename NodeT>
|
||||
class detail::csl_iterator : public boost::iterator_facade<
|
||||
csl_iterator<ValT, NodeT>,
|
||||
ValT,
|
||||
boost::forward_traversal_tag> {
|
||||
public:
|
||||
typedef ValT value_type;
|
||||
typedef value_type& reference;
|
||||
@ -663,44 +721,54 @@ class detail::csl_iterator :
|
||||
|
||||
explicit csl_iterator(NodeT* node = nullptr) : node_(node) {}
|
||||
|
||||
template<typename OtherVal, typename OtherNode>
|
||||
csl_iterator(const csl_iterator<OtherVal, OtherNode> &other,
|
||||
typename std::enable_if<std::is_convertible<OtherVal, ValT>::value>::type*
|
||||
= 0) : node_(other.node_) {}
|
||||
template <typename OtherVal, typename OtherNode>
|
||||
csl_iterator(
|
||||
const csl_iterator<OtherVal, OtherNode>& other,
|
||||
typename std::enable_if<
|
||||
std::is_convertible<OtherVal, ValT>::value>::type* = nullptr)
|
||||
: node_(other.node_) {}
|
||||
|
||||
size_t nodeSize() const {
|
||||
return node_ == nullptr ? 0 :
|
||||
node_->height() * sizeof(NodeT*) + sizeof(*this);
|
||||
return node_ == nullptr ? 0
|
||||
: node_->height() * sizeof(NodeT*) + sizeof(*this);
|
||||
}
|
||||
|
||||
bool good() const { return node_ != nullptr; }
|
||||
bool good() const {
|
||||
return node_ != nullptr;
|
||||
}
|
||||
|
||||
private:
|
||||
friend class boost::iterator_core_access;
|
||||
template<class,class> friend class csl_iterator;
|
||||
template <class, class>
|
||||
friend class csl_iterator;
|
||||
|
||||
void increment() { node_ = node_->next(); };
|
||||
bool equal(const csl_iterator& other) const { return node_ == other.node_; }
|
||||
value_type& dereference() const { return node_->data(); }
|
||||
void increment() {
|
||||
node_ = node_->next();
|
||||
}
|
||||
bool equal(const csl_iterator& other) const {
|
||||
return node_ == other.node_;
|
||||
}
|
||||
value_type& dereference() const {
|
||||
return node_->data();
|
||||
}
|
||||
|
||||
NodeT* node_;
|
||||
};
|
||||
|
||||
// Skipper interface
|
||||
template<typename T, typename Comp, typename NodeAlloc, int MAX_HEIGHT>
|
||||
template <typename T, typename Comp, typename NodeAlloc, int MAX_HEIGHT>
|
||||
class ConcurrentSkipList<T, Comp, NodeAlloc, MAX_HEIGHT>::Skipper {
|
||||
typedef detail::SkipListNode<T> NodeType;
|
||||
typedef ConcurrentSkipList<T, Comp, NodeAlloc, MAX_HEIGHT> SkipListType;
|
||||
typedef typename SkipListType::Accessor Accessor;
|
||||
|
||||
public:
|
||||
typedef T value_type;
|
||||
typedef T value_type;
|
||||
typedef T& reference;
|
||||
typedef T* pointer;
|
||||
typedef ptrdiff_t difference_type;
|
||||
|
||||
Skipper(const std::shared_ptr<SkipListType>& skipList) :
|
||||
accessor_(skipList) {
|
||||
Skipper(const std::shared_ptr<SkipListType>& skipList) : accessor_(skipList) {
|
||||
init();
|
||||
}
|
||||
|
||||
@ -718,13 +786,13 @@ class ConcurrentSkipList<T, Comp, NodeAlloc, MAX_HEIGHT>::Skipper {
|
||||
}
|
||||
int max_layer = maxLayer();
|
||||
for (int i = 0; i < max_layer; ++i) {
|
||||
hints_[i] = i + 1;
|
||||
hints_[i] = uint8_t(i + 1);
|
||||
}
|
||||
hints_[max_layer] = max_layer;
|
||||
}
|
||||
|
||||
// advance to the next node in the list.
|
||||
Skipper& operator ++() {
|
||||
Skipper& operator++() {
|
||||
preds_[0] = succs_[0];
|
||||
succs_[0] = preds_[0]->skip(0);
|
||||
int height = curHeight();
|
||||
@ -735,9 +803,13 @@ class ConcurrentSkipList<T, Comp, NodeAlloc, MAX_HEIGHT>::Skipper {
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool good() const { return succs_[0] != nullptr; }
|
||||
bool good() const {
|
||||
return succs_[0] != nullptr;
|
||||
}
|
||||
|
||||
int maxLayer() const { return headHeight_ - 1; }
|
||||
int maxLayer() const {
|
||||
return headHeight_ - 1;
|
||||
}
|
||||
|
||||
int curHeight() const {
|
||||
// need to cap the height to the cached head height, as the current node
|
||||
@ -746,17 +818,17 @@ class ConcurrentSkipList<T, Comp, NodeAlloc, MAX_HEIGHT>::Skipper {
|
||||
return succs_[0] ? std::min(headHeight_, succs_[0]->height()) : 0;
|
||||
}
|
||||
|
||||
const value_type &data() const {
|
||||
const value_type& data() const {
|
||||
DCHECK(succs_[0] != nullptr);
|
||||
return succs_[0]->data();
|
||||
}
|
||||
|
||||
value_type &operator *() const {
|
||||
value_type& operator*() const {
|
||||
DCHECK(succs_[0] != nullptr);
|
||||
return succs_[0]->data();
|
||||
}
|
||||
|
||||
value_type *operator->() {
|
||||
value_type* operator->() {
|
||||
DCHECK(succs_[0] != nullptr);
|
||||
return &succs_[0]->data();
|
||||
}
|
||||
@ -767,23 +839,27 @@ class ConcurrentSkipList<T, Comp, NodeAlloc, MAX_HEIGHT>::Skipper {
|
||||
*
|
||||
* Returns true if the data is found, false otherwise.
|
||||
*/
|
||||
bool to(const value_type &data) {
|
||||
bool to(const value_type& data) {
|
||||
int layer = curHeight() - 1;
|
||||
if (layer < 0) return false; // reaches the end of the list
|
||||
if (layer < 0) {
|
||||
return false; // reaches the end of the list
|
||||
}
|
||||
|
||||
int lyr = hints_[layer];
|
||||
int max_layer = maxLayer();
|
||||
while (SkipListType::greater(data, succs_[lyr]) && lyr < max_layer) {
|
||||
++lyr;
|
||||
}
|
||||
hints_[layer] = lyr; // update the hint
|
||||
hints_[layer] = lyr; // update the hint
|
||||
|
||||
int foundLayer = SkipListType::
|
||||
findInsertionPoint(preds_[lyr], lyr, data, preds_, succs_);
|
||||
if (foundLayer < 0) return false;
|
||||
int foundLayer = SkipListType::findInsertionPoint(
|
||||
preds_[lyr], lyr, data, preds_, succs_);
|
||||
if (foundLayer < 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
DCHECK(succs_[0] != nullptr) << "lyr=" << lyr
|
||||
<< "; max_layer=" << max_layer;
|
||||
DCHECK(succs_[0] != nullptr)
|
||||
<< "lyr=" << lyr << "; max_layer=" << max_layer;
|
||||
return !succs_[0]->markedForRemoval();
|
||||
}
|
||||
|
||||
|
421
ios/Pods/Folly/folly/ConstexprMath.h
generated
Normal file
421
ios/Pods/Folly/folly/ConstexprMath.h
generated
Normal file
@ -0,0 +1,421 @@
|
||||
/*
|
||||
* Copyright 2017-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <limits>
|
||||
#include <type_traits>
|
||||
|
||||
namespace folly {
|
||||
|
||||
// TODO: Replace with std::equal_to, etc., after upgrading to C++14.
|
||||
template <typename T>
|
||||
struct constexpr_equal_to {
|
||||
constexpr bool operator()(T const& a, T const& b) const {
|
||||
return a == b;
|
||||
}
|
||||
};
|
||||
template <typename T>
|
||||
struct constexpr_not_equal_to {
|
||||
constexpr bool operator()(T const& a, T const& b) const {
|
||||
return a != b;
|
||||
}
|
||||
};
|
||||
template <typename T>
|
||||
struct constexpr_less {
|
||||
constexpr bool operator()(T const& a, T const& b) const {
|
||||
return a < b;
|
||||
}
|
||||
};
|
||||
template <typename T>
|
||||
struct constexpr_less_equal {
|
||||
constexpr bool operator()(T const& a, T const& b) const {
|
||||
return a <= b;
|
||||
}
|
||||
};
|
||||
template <typename T>
|
||||
struct constexpr_greater {
|
||||
constexpr bool operator()(T const& a, T const& b) const {
|
||||
return a > b;
|
||||
}
|
||||
};
|
||||
template <typename T>
|
||||
struct constexpr_greater_equal {
|
||||
constexpr bool operator()(T const& a, T const& b) const {
|
||||
return a >= b;
|
||||
}
|
||||
};
|
||||
|
||||
// TLDR: Prefer using operator< for ordering. And when
|
||||
// a and b are equivalent objects, we return b to make
|
||||
// sorting stable.
|
||||
// See http://stepanovpapers.com/notes.pdf for details.
|
||||
template <typename T>
|
||||
constexpr T constexpr_max(T a) {
|
||||
return a;
|
||||
}
|
||||
template <typename T, typename... Ts>
|
||||
constexpr T constexpr_max(T a, T b, Ts... ts) {
|
||||
return b < a ? constexpr_max(a, ts...) : constexpr_max(b, ts...);
|
||||
}
|
||||
|
||||
// When a and b are equivalent objects, we return a to
|
||||
// make sorting stable.
|
||||
template <typename T>
|
||||
constexpr T constexpr_min(T a) {
|
||||
return a;
|
||||
}
|
||||
template <typename T, typename... Ts>
|
||||
constexpr T constexpr_min(T a, T b, Ts... ts) {
|
||||
return b < a ? constexpr_min(b, ts...) : constexpr_min(a, ts...);
|
||||
}
|
||||
|
||||
template <typename T, typename Less>
|
||||
constexpr T const&
|
||||
constexpr_clamp(T const& v, T const& lo, T const& hi, Less less) {
|
||||
return less(v, lo) ? lo : less(hi, v) ? hi : v;
|
||||
}
|
||||
template <typename T>
|
||||
constexpr T const& constexpr_clamp(T const& v, T const& lo, T const& hi) {
|
||||
return constexpr_clamp(v, lo, hi, constexpr_less<T>{});
|
||||
}
|
||||
|
||||
namespace detail {
|
||||
|
||||
template <typename T, typename = void>
|
||||
struct constexpr_abs_helper {};
|
||||
|
||||
template <typename T>
|
||||
struct constexpr_abs_helper<
|
||||
T,
|
||||
typename std::enable_if<std::is_floating_point<T>::value>::type> {
|
||||
static constexpr T go(T t) {
|
||||
return t < static_cast<T>(0) ? -t : t;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct constexpr_abs_helper<
|
||||
T,
|
||||
typename std::enable_if<
|
||||
std::is_integral<T>::value && !std::is_same<T, bool>::value &&
|
||||
std::is_unsigned<T>::value>::type> {
|
||||
static constexpr T go(T t) {
|
||||
return t;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct constexpr_abs_helper<
|
||||
T,
|
||||
typename std::enable_if<
|
||||
std::is_integral<T>::value && !std::is_same<T, bool>::value &&
|
||||
std::is_signed<T>::value>::type> {
|
||||
static constexpr typename std::make_unsigned<T>::type go(T t) {
|
||||
return typename std::make_unsigned<T>::type(t < static_cast<T>(0) ? -t : t);
|
||||
}
|
||||
};
|
||||
} // namespace detail
|
||||
|
||||
template <typename T>
|
||||
constexpr auto constexpr_abs(T t)
|
||||
-> decltype(detail::constexpr_abs_helper<T>::go(t)) {
|
||||
return detail::constexpr_abs_helper<T>::go(t);
|
||||
}
|
||||
|
||||
namespace detail {
|
||||
template <typename T>
|
||||
constexpr T constexpr_log2_(T a, T e) {
|
||||
return e == T(1) ? a : constexpr_log2_(a + T(1), e / T(2));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
constexpr T constexpr_log2_ceil_(T l2, T t) {
|
||||
return l2 + T(T(1) << l2 < t ? 1 : 0);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
constexpr T constexpr_square_(T t) {
|
||||
return t * t;
|
||||
}
|
||||
} // namespace detail
|
||||
|
||||
template <typename T>
|
||||
constexpr T constexpr_log2(T t) {
|
||||
return detail::constexpr_log2_(T(0), t);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
constexpr T constexpr_log2_ceil(T t) {
|
||||
return detail::constexpr_log2_ceil_(constexpr_log2(t), t);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
constexpr T constexpr_ceil(T t, T round) {
|
||||
return round == T(0)
|
||||
? t
|
||||
: ((t + (t < T(0) ? T(0) : round - T(1))) / round) * round;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
constexpr T constexpr_pow(T base, std::size_t exp) {
|
||||
return exp == 0
|
||||
? T(1)
|
||||
: exp == 1 ? base
|
||||
: detail::constexpr_square_(constexpr_pow(base, exp / 2)) *
|
||||
(exp % 2 ? base : T(1));
|
||||
}
|
||||
|
||||
/// constexpr_find_last_set
|
||||
///
|
||||
/// Return the 1-based index of the most significant bit which is set.
|
||||
/// For x > 0, constexpr_find_last_set(x) == 1 + floor(log2(x)).
|
||||
template <typename T>
|
||||
constexpr std::size_t constexpr_find_last_set(T const t) {
|
||||
using U = std::make_unsigned_t<T>;
|
||||
return t == T(0) ? 0 : 1 + constexpr_log2(static_cast<U>(t));
|
||||
}
|
||||
|
||||
namespace detail {
|
||||
template <typename U>
|
||||
constexpr std::size_t
|
||||
constexpr_find_first_set_(std::size_t s, std::size_t a, U const u) {
|
||||
return s == 0 ? a
|
||||
: constexpr_find_first_set_(
|
||||
s / 2, a + s * bool((u >> a) % (U(1) << s) == U(0)), u);
|
||||
}
|
||||
} // namespace detail
|
||||
|
||||
/// constexpr_find_first_set
|
||||
///
|
||||
/// Return the 1-based index of the least significant bit which is set.
|
||||
/// For x > 0, the exponent in the largest power of two which does not divide x.
|
||||
template <typename T>
|
||||
constexpr std::size_t constexpr_find_first_set(T t) {
|
||||
using U = std::make_unsigned_t<T>;
|
||||
using size = std::integral_constant<std::size_t, sizeof(T) * 4>;
|
||||
return t == T(0)
|
||||
? 0
|
||||
: 1 + detail::constexpr_find_first_set_(size{}, 0, static_cast<U>(t));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
constexpr T constexpr_add_overflow_clamped(T a, T b) {
|
||||
using L = std::numeric_limits<T>;
|
||||
using M = std::intmax_t;
|
||||
static_assert(
|
||||
!std::is_integral<T>::value || sizeof(T) <= sizeof(M),
|
||||
"Integral type too large!");
|
||||
// clang-format off
|
||||
return
|
||||
// don't do anything special for non-integral types.
|
||||
!std::is_integral<T>::value ? a + b :
|
||||
// for narrow integral types, just convert to intmax_t.
|
||||
sizeof(T) < sizeof(M)
|
||||
? T(constexpr_clamp(M(a) + M(b), M(L::min()), M(L::max()))) :
|
||||
// when a >= 0, cannot add more than `MAX - a` onto a.
|
||||
!(a < 0) ? a + constexpr_min(b, T(L::max() - a)) :
|
||||
// a < 0 && b >= 0, `a + b` will always be in valid range of type T.
|
||||
!(b < 0) ? a + b :
|
||||
// a < 0 && b < 0, keep the result >= MIN.
|
||||
a + constexpr_max(b, T(L::min() - a));
|
||||
// clang-format on
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
constexpr T constexpr_sub_overflow_clamped(T a, T b) {
|
||||
using L = std::numeric_limits<T>;
|
||||
using M = std::intmax_t;
|
||||
static_assert(
|
||||
!std::is_integral<T>::value || sizeof(T) <= sizeof(M),
|
||||
"Integral type too large!");
|
||||
// clang-format off
|
||||
return
|
||||
// don't do anything special for non-integral types.
|
||||
!std::is_integral<T>::value ? a - b :
|
||||
// for unsigned type, keep result >= 0.
|
||||
std::is_unsigned<T>::value ? (a < b ? 0 : a - b) :
|
||||
// for narrow signed integral types, just convert to intmax_t.
|
||||
sizeof(T) < sizeof(M)
|
||||
? T(constexpr_clamp(M(a) - M(b), M(L::min()), M(L::max()))) :
|
||||
// (a >= 0 && b >= 0) || (a < 0 && b < 0), `a - b` will always be valid.
|
||||
(a < 0) == (b < 0) ? a - b :
|
||||
// MIN < b, so `-b` should be in valid range (-MAX <= -b <= MAX),
|
||||
// convert subtraction to addition.
|
||||
L::min() < b ? constexpr_add_overflow_clamped(a, T(-b)) :
|
||||
// -b = -MIN = (MAX + 1) and a <= -1, result is in valid range.
|
||||
a < 0 ? a - b :
|
||||
// -b = -MIN = (MAX + 1) and a >= 0, result > MAX.
|
||||
L::max();
|
||||
// clang-format on
|
||||
}
|
||||
|
||||
// clamp_cast<> provides sane numeric conversions from float point numbers to
|
||||
// integral numbers, and between different types of integral numbers. It helps
|
||||
// to avoid unexpected bugs introduced by bad conversion, and undefined behavior
|
||||
// like overflow when casting float point numbers to integral numbers.
|
||||
//
|
||||
// When doing clamp_cast<Dst>(value), if `value` is in valid range of Dst,
|
||||
// it will give correct result in Dst, equal to `value`.
|
||||
//
|
||||
// If `value` is outside the representable range of Dst, it will be clamped to
|
||||
// MAX or MIN in Dst, instead of being undefined behavior.
|
||||
//
|
||||
// Float NaNs are converted to 0 in integral type.
|
||||
//
|
||||
// Here's some comparision with static_cast<>:
|
||||
// (with FB-internal gcc-5-glibc-2.23 toolchain)
|
||||
//
|
||||
// static_cast<int32_t>(NaN) = 6
|
||||
// clamp_cast<int32_t>(NaN) = 0
|
||||
//
|
||||
// static_cast<int32_t>(9999999999.0f) = -348639895
|
||||
// clamp_cast<int32_t>(9999999999.0f) = 2147483647
|
||||
//
|
||||
// static_cast<int32_t>(2147483647.0f) = -348639895
|
||||
// clamp_cast<int32_t>(2147483647.0f) = 2147483647
|
||||
//
|
||||
// static_cast<uint32_t>(4294967295.0f) = 0
|
||||
// clamp_cast<uint32_t>(4294967295.0f) = 4294967295
|
||||
//
|
||||
// static_cast<uint32_t>(-1) = 4294967295
|
||||
// clamp_cast<uint32_t>(-1) = 0
|
||||
//
|
||||
// static_cast<int16_t>(32768u) = -32768
|
||||
// clamp_cast<int16_t>(32768u) = 32767
|
||||
|
||||
template <typename Dst, typename Src>
|
||||
constexpr typename std::enable_if<std::is_integral<Src>::value, Dst>::type
|
||||
constexpr_clamp_cast(Src src) {
|
||||
static_assert(
|
||||
std::is_integral<Dst>::value && sizeof(Dst) <= sizeof(int64_t),
|
||||
"constexpr_clamp_cast can only cast into integral type (up to 64bit)");
|
||||
|
||||
using L = std::numeric_limits<Dst>;
|
||||
// clang-format off
|
||||
return
|
||||
// Check if Src and Dst have same signedness.
|
||||
std::is_signed<Src>::value == std::is_signed<Dst>::value
|
||||
? (
|
||||
// Src and Dst have same signedness. If sizeof(Src) <= sizeof(Dst),
|
||||
// we can safely convert Src to Dst without any loss of accuracy.
|
||||
sizeof(Src) <= sizeof(Dst) ? Dst(src) :
|
||||
// If Src is larger in size, we need to clamp it to valid range in Dst.
|
||||
Dst(constexpr_clamp(src, Src(L::min()), Src(L::max()))))
|
||||
// Src and Dst have different signedness.
|
||||
// Check if it's signed -> unsigend cast.
|
||||
: std::is_signed<Src>::value && std::is_unsigned<Dst>::value
|
||||
? (
|
||||
// If src < 0, the result should be 0.
|
||||
src < 0 ? Dst(0) :
|
||||
// Otherwise, src >= 0. If src can fit into Dst, we can safely cast it
|
||||
// without loss of accuracy.
|
||||
sizeof(Src) <= sizeof(Dst) ? Dst(src) :
|
||||
// If Src is larger in size than Dst, we need to ensure the result is
|
||||
// at most Dst MAX.
|
||||
Dst(constexpr_min(src, Src(L::max()))))
|
||||
// It's unsigned -> signed cast.
|
||||
: (
|
||||
// Since Src is unsigned, and Dst is signed, Src can fit into Dst only
|
||||
// when sizeof(Src) < sizeof(Dst).
|
||||
sizeof(Src) < sizeof(Dst) ? Dst(src) :
|
||||
// If Src does not fit into Dst, we need to ensure the result is at most
|
||||
// Dst MAX.
|
||||
Dst(constexpr_min(src, Src(L::max()))));
|
||||
// clang-format on
|
||||
}
|
||||
|
||||
namespace detail {
|
||||
// Upper/lower bound values that could be accurately represented in both
|
||||
// integral and float point types.
|
||||
constexpr double kClampCastLowerBoundDoubleToInt64F = -9223372036854774784.0;
|
||||
constexpr double kClampCastUpperBoundDoubleToInt64F = 9223372036854774784.0;
|
||||
constexpr double kClampCastUpperBoundDoubleToUInt64F = 18446744073709549568.0;
|
||||
|
||||
constexpr float kClampCastLowerBoundFloatToInt32F = -2147483520.0f;
|
||||
constexpr float kClampCastUpperBoundFloatToInt32F = 2147483520.0f;
|
||||
constexpr float kClampCastUpperBoundFloatToUInt32F = 4294967040.0f;
|
||||
|
||||
// This works the same as constexpr_clamp, but the comparision are done in Src
|
||||
// to prevent any implicit promotions.
|
||||
template <typename D, typename S>
|
||||
constexpr D constexpr_clamp_cast_helper(S src, S sl, S su, D dl, D du) {
|
||||
return src < sl ? dl : (src > su ? du : D(src));
|
||||
}
|
||||
} // namespace detail
|
||||
|
||||
template <typename Dst, typename Src>
|
||||
constexpr typename std::enable_if<std::is_floating_point<Src>::value, Dst>::type
|
||||
constexpr_clamp_cast(Src src) {
|
||||
static_assert(
|
||||
std::is_integral<Dst>::value && sizeof(Dst) <= sizeof(int64_t),
|
||||
"constexpr_clamp_cast can only cast into integral type (up to 64bit)");
|
||||
|
||||
using L = std::numeric_limits<Dst>;
|
||||
// clang-format off
|
||||
return
|
||||
// Special case: cast NaN into 0.
|
||||
// Using a trick here to portably check for NaN: f != f only if f is NaN.
|
||||
// see: https://stackoverflow.com/a/570694
|
||||
(src != src) ? Dst(0) :
|
||||
// using `sizeof(Src) > sizeof(Dst)` as a heuristic that Dst can be
|
||||
// represented in Src without loss of accuracy.
|
||||
// see: https://en.wikipedia.org/wiki/Floating-point_arithmetic
|
||||
sizeof(Src) > sizeof(Dst) ?
|
||||
detail::constexpr_clamp_cast_helper(
|
||||
src, Src(L::min()), Src(L::max()), L::min(), L::max()) :
|
||||
// sizeof(Src) < sizeof(Dst) only happens when doing cast of
|
||||
// 32bit float -> u/int64_t.
|
||||
// Losslessly promote float into double, change into double -> u/int64_t.
|
||||
sizeof(Src) < sizeof(Dst) ? (
|
||||
src >= 0.0
|
||||
? constexpr_clamp_cast<Dst>(
|
||||
constexpr_clamp_cast<std::uint64_t>(double(src)))
|
||||
: constexpr_clamp_cast<Dst>(
|
||||
constexpr_clamp_cast<std::int64_t>(double(src)))) :
|
||||
// The following are for sizeof(Src) == sizeof(Dst).
|
||||
std::is_same<Src, double>::value && std::is_same<Dst, int64_t>::value ?
|
||||
detail::constexpr_clamp_cast_helper(
|
||||
double(src),
|
||||
detail::kClampCastLowerBoundDoubleToInt64F,
|
||||
detail::kClampCastUpperBoundDoubleToInt64F,
|
||||
L::min(),
|
||||
L::max()) :
|
||||
std::is_same<Src, double>::value && std::is_same<Dst, uint64_t>::value ?
|
||||
detail::constexpr_clamp_cast_helper(
|
||||
double(src),
|
||||
0.0,
|
||||
detail::kClampCastUpperBoundDoubleToUInt64F,
|
||||
L::min(),
|
||||
L::max()) :
|
||||
std::is_same<Src, float>::value && std::is_same<Dst, int32_t>::value ?
|
||||
detail::constexpr_clamp_cast_helper(
|
||||
float(src),
|
||||
detail::kClampCastLowerBoundFloatToInt32F,
|
||||
detail::kClampCastUpperBoundFloatToInt32F,
|
||||
L::min(),
|
||||
L::max()) :
|
||||
detail::constexpr_clamp_cast_helper(
|
||||
float(src),
|
||||
0.0f,
|
||||
detail::kClampCastUpperBoundFloatToUInt32F,
|
||||
L::min(),
|
||||
L::max());
|
||||
// clang-format on
|
||||
}
|
||||
|
||||
} // namespace folly
|
42
ios/Pods/Folly/folly/ContainerTraits.h
generated
42
ios/Pods/Folly/folly/ContainerTraits.h
generated
@ -1,42 +0,0 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <folly/Traits.h>
|
||||
|
||||
namespace folly {
|
||||
|
||||
FOLLY_CREATE_HAS_MEMBER_FN_TRAITS(container_emplace_back_traits, emplace_back);
|
||||
|
||||
template <class Container, typename... Args>
|
||||
inline
|
||||
typename std::enable_if<
|
||||
container_emplace_back_traits<Container, void(Args...)>::value>::type
|
||||
container_emplace_back_or_push_back(Container& container, Args&&... args) {
|
||||
container.emplace_back(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
template <class Container, typename... Args>
|
||||
inline
|
||||
typename std::enable_if<
|
||||
!container_emplace_back_traits<Container, void(Args...)>::value>::type
|
||||
container_emplace_back_or_push_back(Container& container, Args&&... args) {
|
||||
using v = typename Container::value_type;
|
||||
container.push_back(v(std::forward<Args>(args)...));
|
||||
}
|
||||
|
||||
}
|
397
ios/Pods/Folly/folly/Conv.cpp
generated
397
ios/Pods/Folly/folly/Conv.cpp
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2011-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -50,28 +50,34 @@ struct MaxString {
|
||||
static const char* const value;
|
||||
};
|
||||
|
||||
template <> const char *const MaxString<uint8_t>::value = "255";
|
||||
template <> const char *const MaxString<uint16_t>::value = "65535";
|
||||
template <> const char *const MaxString<uint32_t>::value = "4294967295";
|
||||
template <>
|
||||
const char* const MaxString<uint8_t>::value = "255";
|
||||
template <>
|
||||
const char* const MaxString<uint16_t>::value = "65535";
|
||||
template <>
|
||||
const char* const MaxString<uint32_t>::value = "4294967295";
|
||||
#if __SIZEOF_LONG__ == 4
|
||||
template <> const char *const MaxString<unsigned long>::value =
|
||||
"4294967295";
|
||||
template <>
|
||||
const char* const MaxString<unsigned long>::value = "4294967295";
|
||||
#else
|
||||
template <> const char *const MaxString<unsigned long>::value =
|
||||
"18446744073709551615";
|
||||
template <>
|
||||
const char* const MaxString<unsigned long>::value = "18446744073709551615";
|
||||
#endif
|
||||
static_assert(sizeof(unsigned long) >= 4,
|
||||
"Wrong value for MaxString<unsigned long>::value,"
|
||||
" please update.");
|
||||
template <> const char *const MaxString<unsigned long long>::value =
|
||||
"18446744073709551615";
|
||||
static_assert(sizeof(unsigned long long) >= 8,
|
||||
"Wrong value for MaxString<unsigned long long>::value"
|
||||
", please update.");
|
||||
static_assert(
|
||||
sizeof(unsigned long) >= 4,
|
||||
"Wrong value for MaxString<unsigned long>::value,"
|
||||
" please update.");
|
||||
template <>
|
||||
const char* const MaxString<unsigned long long>::value = "18446744073709551615";
|
||||
static_assert(
|
||||
sizeof(unsigned long long) >= 8,
|
||||
"Wrong value for MaxString<unsigned long long>::value"
|
||||
", please update.");
|
||||
|
||||
#if FOLLY_HAVE_INT128_T
|
||||
template <> const char *const MaxString<__uint128_t>::value =
|
||||
"340282366920938463463374607431768211455";
|
||||
template <>
|
||||
const char* const MaxString<__uint128_t>::value =
|
||||
"340282366920938463463374607431768211455";
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -86,120 +92,120 @@ template <> const char *const MaxString<__uint128_t>::value =
|
||||
// still not overflow uint16_t.
|
||||
constexpr int32_t OOR = 10000;
|
||||
|
||||
FOLLY_ALIGNED(16) constexpr uint16_t shift1[] = {
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 0-9
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 10
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 20
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 30
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, 0, // 40
|
||||
1, 2, 3, 4, 5, 6, 7, 8, 9, OOR, OOR,
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 60
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 70
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 80
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 90
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 100
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 110
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 120
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 130
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 140
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 150
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 160
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 170
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 180
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 190
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 200
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 210
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 220
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 230
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 240
|
||||
OOR, OOR, OOR, OOR, OOR, OOR // 250
|
||||
alignas(16) constexpr uint16_t shift1[] = {
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 0-9
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 10
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 20
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 30
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, 0, 1, // 40
|
||||
2, 3, 4, 5, 6, 7, 8, 9, OOR, OOR,
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 60
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 70
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 80
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 90
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 100
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 110
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 120
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 130
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 140
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 150
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 160
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 170
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 180
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 190
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 200
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 210
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 220
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 230
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 240
|
||||
OOR, OOR, OOR, OOR, OOR, OOR // 250
|
||||
};
|
||||
|
||||
FOLLY_ALIGNED(16) constexpr uint16_t shift10[] = {
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 0-9
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 10
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 20
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 30
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, 0, // 40
|
||||
10, 20, 30, 40, 50, 60, 70, 80, 90, OOR, OOR,
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 60
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 70
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 80
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 90
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 100
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 110
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 120
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 130
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 140
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 150
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 160
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 170
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 180
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 190
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 200
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 210
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 220
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 230
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 240
|
||||
OOR, OOR, OOR, OOR, OOR, OOR // 250
|
||||
alignas(16) constexpr uint16_t shift10[] = {
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 0-9
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 10
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 20
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 30
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, 0, 10, // 40
|
||||
20, 30, 40, 50, 60, 70, 80, 90, OOR, OOR,
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 60
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 70
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 80
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 90
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 100
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 110
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 120
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 130
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 140
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 150
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 160
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 170
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 180
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 190
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 200
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 210
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 220
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 230
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 240
|
||||
OOR, OOR, OOR, OOR, OOR, OOR // 250
|
||||
};
|
||||
|
||||
FOLLY_ALIGNED(16) constexpr uint16_t shift100[] = {
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 0-9
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 10
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 20
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 30
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, 0, // 40
|
||||
100, 200, 300, 400, 500, 600, 700, 800, 900, OOR, OOR,
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 60
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 70
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 80
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 90
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 100
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 110
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 120
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 130
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 140
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 150
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 160
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 170
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 180
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 190
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 200
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 210
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 220
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 230
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 240
|
||||
OOR, OOR, OOR, OOR, OOR, OOR // 250
|
||||
alignas(16) constexpr uint16_t shift100[] = {
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 0-9
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 10
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 20
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 30
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, 0, 100, // 40
|
||||
200, 300, 400, 500, 600, 700, 800, 900, OOR, OOR,
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 60
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 70
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 80
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 90
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 100
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 110
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 120
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 130
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 140
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 150
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 160
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 170
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 180
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 190
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 200
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 210
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 220
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 230
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 240
|
||||
OOR, OOR, OOR, OOR, OOR, OOR // 250
|
||||
};
|
||||
|
||||
FOLLY_ALIGNED(16) constexpr uint16_t shift1000[] = {
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 0-9
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 10
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 20
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 30
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, 0, // 40
|
||||
1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, OOR, OOR,
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 60
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 70
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 80
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 90
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 100
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 110
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 120
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 130
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 140
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 150
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 160
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 170
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 180
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 190
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 200
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 210
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 220
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 230
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 240
|
||||
OOR, OOR, OOR, OOR, OOR, OOR // 250
|
||||
alignas(16) constexpr uint16_t shift1000[] = {
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 0-9
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 10
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 20
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 30
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, 0, 1000, // 40
|
||||
2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, OOR, OOR,
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 60
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 70
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 80
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 90
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 100
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 110
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 120
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 130
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 140
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 150
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 160
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 170
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 180
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 190
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 200
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 210
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 220
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 230
|
||||
OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, // 240
|
||||
OOR, OOR, OOR, OOR, OOR, OOR // 250
|
||||
};
|
||||
|
||||
struct ErrorString {
|
||||
@ -229,13 +235,13 @@ constexpr const std::array<
|
||||
}};
|
||||
|
||||
// Check if ASCII is really ASCII
|
||||
using IsAscii = std::
|
||||
integral_constant<bool, 'A' == 65 && 'Z' == 90 && 'a' == 97 && 'z' == 122>;
|
||||
using IsAscii =
|
||||
bool_constant<'A' == 65 && 'Z' == 90 && 'a' == 97 && 'z' == 122>;
|
||||
|
||||
// The code in this file that uses tolower() really only cares about
|
||||
// 7-bit ASCII characters, so we can take a nice shortcut here.
|
||||
inline char tolower_ascii(char in) {
|
||||
return IsAscii::value ? in | 0x20 : std::tolower(in);
|
||||
return IsAscii::value ? in | 0x20 : char(std::tolower(in));
|
||||
}
|
||||
|
||||
inline bool bool_str_cmp(const char** b, size_t len, const char* value) {
|
||||
@ -255,7 +261,7 @@ inline bool bool_str_cmp(const char** b, size_t len, const char* value) {
|
||||
return true;
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
} // namespace
|
||||
|
||||
Expected<bool, ConversionCode> str_to_bool(StringPiece* src) noexcept {
|
||||
auto b = src->begin(), e = src->end();
|
||||
@ -269,7 +275,7 @@ Expected<bool, ConversionCode> str_to_bool(StringPiece* src) noexcept {
|
||||
}
|
||||
|
||||
bool result;
|
||||
size_t len = e - b;
|
||||
size_t len = size_t(e - b);
|
||||
switch (*b) {
|
||||
case '0':
|
||||
case '1': {
|
||||
@ -286,7 +292,7 @@ Expected<bool, ConversionCode> str_to_bool(StringPiece* src) noexcept {
|
||||
case 'Y':
|
||||
result = true;
|
||||
if (!bool_str_cmp(&b, len, "yes")) {
|
||||
++b; // accept the single 'y' character
|
||||
++b; // accept the single 'y' character
|
||||
}
|
||||
break;
|
||||
case 'n':
|
||||
@ -336,22 +342,24 @@ Expected<bool, ConversionCode> str_to_bool(StringPiece* src) noexcept {
|
||||
template <class Tgt>
|
||||
Expected<Tgt, ConversionCode> str_to_floating(StringPiece* src) noexcept {
|
||||
using namespace double_conversion;
|
||||
static StringToDoubleConverter
|
||||
conv(StringToDoubleConverter::ALLOW_TRAILING_JUNK
|
||||
| StringToDoubleConverter::ALLOW_LEADING_SPACES,
|
||||
0.0,
|
||||
// return this for junk input string
|
||||
std::numeric_limits<double>::quiet_NaN(),
|
||||
nullptr, nullptr);
|
||||
static StringToDoubleConverter conv(
|
||||
StringToDoubleConverter::ALLOW_TRAILING_JUNK |
|
||||
StringToDoubleConverter::ALLOW_LEADING_SPACES,
|
||||
0.0,
|
||||
// return this for junk input string
|
||||
std::numeric_limits<double>::quiet_NaN(),
|
||||
nullptr,
|
||||
nullptr);
|
||||
|
||||
if (src->empty()) {
|
||||
return makeUnexpected(ConversionCode::EMPTY_INPUT_STRING);
|
||||
}
|
||||
|
||||
int length;
|
||||
auto result = conv.StringToDouble(src->data(),
|
||||
static_cast<int>(src->size()),
|
||||
&length); // processed char count
|
||||
auto result = conv.StringToDouble(
|
||||
src->data(),
|
||||
static_cast<int>(src->size()),
|
||||
&length); // processed char count
|
||||
|
||||
if (!std::isnan(result)) {
|
||||
// If we get here with length = 0, the input string is empty.
|
||||
@ -361,11 +369,28 @@ Expected<Tgt, ConversionCode> str_to_floating(StringPiece* src) noexcept {
|
||||
// want to raise an error; length will point past the last character
|
||||
// that was processed, so we need to check if that character was
|
||||
// whitespace or not.
|
||||
if (length == 0 || (result == 0.0 && std::isspace((*src)[length - 1]))) {
|
||||
if (length == 0 ||
|
||||
(result == 0.0 && std::isspace((*src)[size_t(length) - 1]))) {
|
||||
return makeUnexpected(ConversionCode::EMPTY_INPUT_STRING);
|
||||
}
|
||||
src->advance(length);
|
||||
return result;
|
||||
if (length >= 2) {
|
||||
const char* suffix = src->data() + length - 1;
|
||||
// double_conversion doesn't update length correctly when there is an
|
||||
// incomplete exponent specifier. Converting "12e-f-g" shouldn't consume
|
||||
// any more than "12", but it will consume "12e-".
|
||||
|
||||
// "123-" should only parse "123"
|
||||
if (*suffix == '-' || *suffix == '+') {
|
||||
--suffix;
|
||||
--length;
|
||||
}
|
||||
// "12e-f-g" or "12euro" should only parse "12"
|
||||
if (*suffix == 'e' || *suffix == 'E') {
|
||||
--length;
|
||||
}
|
||||
}
|
||||
src->advance(size_t(length));
|
||||
return Tgt(result);
|
||||
}
|
||||
|
||||
auto* e = src->end();
|
||||
@ -374,7 +399,7 @@ Expected<Tgt, ConversionCode> str_to_floating(StringPiece* src) noexcept {
|
||||
|
||||
// There must be non-whitespace, otherwise we would have caught this above
|
||||
assert(b < e);
|
||||
size_t size = e - b;
|
||||
size_t size = size_t(e - b);
|
||||
|
||||
bool negative = false;
|
||||
if (*b == '-') {
|
||||
@ -423,7 +448,7 @@ Expected<Tgt, ConversionCode> str_to_floating(StringPiece* src) noexcept {
|
||||
|
||||
src->assign(b, e);
|
||||
|
||||
return result;
|
||||
return Tgt(result);
|
||||
}
|
||||
|
||||
template Expected<float, ConversionCode> str_to_floating<float>(
|
||||
@ -463,12 +488,12 @@ class SignedValueHandler<T, true> {
|
||||
Expected<T, ConversionCode> finalize(U value) {
|
||||
T rv;
|
||||
if (negative_) {
|
||||
rv = -value;
|
||||
rv = T(-value);
|
||||
if (UNLIKELY(rv > 0)) {
|
||||
return makeUnexpected(ConversionCode::NEGATIVE_OVERFLOW);
|
||||
}
|
||||
} else {
|
||||
rv = value;
|
||||
rv = T(value);
|
||||
if (UNLIKELY(rv < 0)) {
|
||||
return makeUnexpected(ConversionCode::POSITIVE_OVERFLOW);
|
||||
}
|
||||
@ -518,7 +543,7 @@ inline Expected<Tgt, ConversionCode> digits_to(
|
||||
return makeUnexpected(err);
|
||||
}
|
||||
|
||||
size_t size = e - b;
|
||||
size_t size = size_t(e - b);
|
||||
|
||||
/* Although the string is entirely made of digits, we still need to
|
||||
* check for overflow.
|
||||
@ -531,7 +556,7 @@ inline Expected<Tgt, ConversionCode> digits_to(
|
||||
return Tgt(0); // just zeros, e.g. "0000"
|
||||
}
|
||||
if (*b != '0') {
|
||||
size = e - b;
|
||||
size = size_t(e - b);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -549,7 +574,7 @@ inline Expected<Tgt, ConversionCode> digits_to(
|
||||
UT result = 0;
|
||||
|
||||
for (; e - b >= 4; b += 4) {
|
||||
result *= 10000;
|
||||
result *= static_cast<UT>(10000);
|
||||
const int32_t r0 = shift1000[static_cast<size_t>(b[0])];
|
||||
const int32_t r1 = shift100[static_cast<size_t>(b[1])];
|
||||
const int32_t r2 = shift10[static_cast<size_t>(b[2])];
|
||||
@ -558,45 +583,45 @@ inline Expected<Tgt, ConversionCode> digits_to(
|
||||
if (sum >= OOR) {
|
||||
goto outOfRange;
|
||||
}
|
||||
result += sum;
|
||||
result += UT(sum);
|
||||
}
|
||||
|
||||
switch (e - b) {
|
||||
case 3: {
|
||||
const int32_t r0 = shift100[static_cast<size_t>(b[0])];
|
||||
const int32_t r1 = shift10[static_cast<size_t>(b[1])];
|
||||
const int32_t r2 = shift1[static_cast<size_t>(b[2])];
|
||||
const auto sum = r0 + r1 + r2;
|
||||
if (sum >= OOR) {
|
||||
goto outOfRange;
|
||||
case 3: {
|
||||
const int32_t r0 = shift100[static_cast<size_t>(b[0])];
|
||||
const int32_t r1 = shift10[static_cast<size_t>(b[1])];
|
||||
const int32_t r2 = shift1[static_cast<size_t>(b[2])];
|
||||
const auto sum = r0 + r1 + r2;
|
||||
if (sum >= OOR) {
|
||||
goto outOfRange;
|
||||
}
|
||||
result = UT(1000 * result + sum);
|
||||
break;
|
||||
}
|
||||
result = 1000 * result + sum;
|
||||
break;
|
||||
}
|
||||
case 2: {
|
||||
const int32_t r0 = shift10[static_cast<size_t>(b[0])];
|
||||
const int32_t r1 = shift1[static_cast<size_t>(b[1])];
|
||||
const auto sum = r0 + r1;
|
||||
if (sum >= OOR) {
|
||||
goto outOfRange;
|
||||
case 2: {
|
||||
const int32_t r0 = shift10[static_cast<size_t>(b[0])];
|
||||
const int32_t r1 = shift1[static_cast<size_t>(b[1])];
|
||||
const auto sum = r0 + r1;
|
||||
if (sum >= OOR) {
|
||||
goto outOfRange;
|
||||
}
|
||||
result = UT(100 * result + sum);
|
||||
break;
|
||||
}
|
||||
result = 100 * result + sum;
|
||||
break;
|
||||
}
|
||||
case 1: {
|
||||
const int32_t sum = shift1[static_cast<size_t>(b[0])];
|
||||
if (sum >= OOR) {
|
||||
goto outOfRange;
|
||||
case 1: {
|
||||
const int32_t sum = shift1[static_cast<size_t>(b[0])];
|
||||
if (sum >= OOR) {
|
||||
goto outOfRange;
|
||||
}
|
||||
result = UT(10 * result + sum);
|
||||
break;
|
||||
}
|
||||
result = 10 * result + sum;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
assert(b == e);
|
||||
if (size == 0) {
|
||||
return makeUnexpected(ConversionCode::NO_DIGITS);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
assert(b == e);
|
||||
if (size == 0) {
|
||||
return makeUnexpected(ConversionCode::NO_DIGITS);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
return sgn.finalize(result);
|
||||
@ -695,7 +720,7 @@ Expected<Tgt, ConversionCode> str_to_integral(StringPiece* src) noexcept {
|
||||
auto res = sgn.finalize(tmp.value());
|
||||
|
||||
if (res.hasValue()) {
|
||||
src->advance(m - src->data());
|
||||
src->advance(size_t(m - src->data()));
|
||||
}
|
||||
|
||||
return res;
|
||||
|
403
ios/Pods/Folly/folly/Conv.h
generated
403
ios/Pods/Folly/folly/Conv.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2011-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -35,7 +35,6 @@
|
||||
#include <typeinfo>
|
||||
#include <utility>
|
||||
|
||||
#include <boost/implicit_cast.hpp>
|
||||
#include <double-conversion/double-conversion.h> // V8 JavaScript implementation
|
||||
|
||||
#include <folly/Demangle.h>
|
||||
@ -45,6 +44,7 @@
|
||||
#include <folly/Range.h>
|
||||
#include <folly/Traits.h>
|
||||
#include <folly/Unit.h>
|
||||
#include <folly/lang/Exception.h>
|
||||
#include <folly/portability/Math.h>
|
||||
|
||||
namespace folly {
|
||||
@ -130,10 +130,10 @@ inline ConversionCode enforceWhitespaceErr(StringPiece sp) {
|
||||
inline void enforceWhitespace(StringPiece sp) {
|
||||
auto err = enforceWhitespaceErr(sp);
|
||||
if (err != ConversionCode::SUCCESS) {
|
||||
throw makeConversionError(err, sp);
|
||||
throw_exception(makeConversionError(err, sp));
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace detail
|
||||
|
||||
/**
|
||||
* The identity conversion function.
|
||||
@ -195,9 +195,9 @@ namespace detail {
|
||||
// though the runtime performance is the same.
|
||||
|
||||
template <typename... Ts>
|
||||
auto getLastElement(Ts&&... ts) -> decltype(
|
||||
std::get<sizeof...(Ts)-1>(std::forward_as_tuple(std::forward<Ts>(ts)...))) {
|
||||
return std::get<sizeof...(Ts)-1>(
|
||||
auto getLastElement(Ts&&... ts) -> decltype(std::get<sizeof...(Ts) - 1>(
|
||||
std::forward_as_tuple(std::forward<Ts>(ts)...))) {
|
||||
return std::get<sizeof...(Ts) - 1>(
|
||||
std::forward_as_tuple(std::forward<Ts>(ts)...));
|
||||
}
|
||||
|
||||
@ -250,13 +250,12 @@ struct LastElement : std::decay<decltype(
|
||||
namespace detail {
|
||||
|
||||
template <typename IntegerType>
|
||||
constexpr unsigned int
|
||||
digitsEnough() {
|
||||
constexpr unsigned int digitsEnough() {
|
||||
return (unsigned int)(ceil(sizeof(IntegerType) * CHAR_BIT * M_LN2 / M_LN10));
|
||||
}
|
||||
|
||||
inline size_t
|
||||
unsafeTelescope128(char * buffer, size_t room, unsigned __int128 x) {
|
||||
unsafeTelescope128(char* buffer, size_t room, unsigned __int128 x) {
|
||||
typedef unsigned __int128 Usrc;
|
||||
size_t p = room - 1;
|
||||
|
||||
@ -264,26 +263,26 @@ unsafeTelescope128(char * buffer, size_t room, unsigned __int128 x) {
|
||||
const auto y = x / 10;
|
||||
const auto digit = x % 10;
|
||||
|
||||
buffer[p--] = '0' + digit;
|
||||
buffer[p--] = static_cast<char>('0' + digit);
|
||||
x = y;
|
||||
}
|
||||
|
||||
uint64_t xx = x; // Moving to faster 64-bit division thereafter
|
||||
uint64_t xx = static_cast<uint64_t>(x); // Rest uses faster 64-bit division
|
||||
|
||||
while (xx >= 10) {
|
||||
const auto y = xx / 10ULL;
|
||||
const auto digit = xx % 10ULL;
|
||||
|
||||
buffer[p--] = '0' + digit;
|
||||
buffer[p--] = static_cast<char>('0' + digit);
|
||||
xx = y;
|
||||
}
|
||||
|
||||
buffer[p] = '0' + xx;
|
||||
buffer[p] = static_cast<char>('0' + xx);
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
}
|
||||
} // namespace detail
|
||||
#endif
|
||||
|
||||
/**
|
||||
@ -303,7 +302,7 @@ inline uint32_t digits10(uint64_t v) {
|
||||
// 10^i, defined for i 0 through 19.
|
||||
// This is 20 * 8 == 160 bytes, which fits neatly into 5 cache lines
|
||||
// (assuming a cache line size of 64).
|
||||
static const uint64_t powersOf10[20] FOLLY_ALIGNED(64) = {
|
||||
alignas(64) static const uint64_t powersOf10[20] = {
|
||||
1,
|
||||
10,
|
||||
100,
|
||||
@ -327,12 +326,12 @@ inline uint32_t digits10(uint64_t v) {
|
||||
};
|
||||
|
||||
// "count leading zeroes" operation not valid; for 0; special case this.
|
||||
if UNLIKELY (! v) {
|
||||
if (UNLIKELY(!v)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
// bits is in the ballpark of log_2(v).
|
||||
const uint8_t leadingZeroes = __builtin_clzll(v);
|
||||
const uint32_t leadingZeroes = __builtin_clzll(v);
|
||||
const auto bits = 63 - leadingZeroes;
|
||||
|
||||
// approximate log_10(v) == log_10(2) * bits.
|
||||
@ -342,16 +341,24 @@ inline uint32_t digits10(uint64_t v) {
|
||||
|
||||
// return that log_10 lower bound, plus adjust if input >= 10^(that bound)
|
||||
// in case there's a small error and we misjudged length.
|
||||
return minLength + (uint32_t) (UNLIKELY (v >= powersOf10[minLength]));
|
||||
return minLength + uint32_t(v >= powersOf10[minLength]);
|
||||
|
||||
#else
|
||||
|
||||
uint32_t result = 1;
|
||||
for (;;) {
|
||||
if (LIKELY(v < 10)) return result;
|
||||
if (LIKELY(v < 100)) return result + 1;
|
||||
if (LIKELY(v < 1000)) return result + 2;
|
||||
if (LIKELY(v < 10000)) return result + 3;
|
||||
while (true) {
|
||||
if (LIKELY(v < 10)) {
|
||||
return result;
|
||||
}
|
||||
if (LIKELY(v < 100)) {
|
||||
return result + 1;
|
||||
}
|
||||
if (LIKELY(v < 1000)) {
|
||||
return result + 2;
|
||||
}
|
||||
if (LIKELY(v < 10000)) {
|
||||
return result + 3;
|
||||
}
|
||||
// Skip ahead by 4 orders of magnitude
|
||||
v /= 10000U;
|
||||
result += 4;
|
||||
@ -373,7 +380,7 @@ inline uint32_t digits10(uint64_t v) {
|
||||
* because it does not add a terminating \0.
|
||||
*/
|
||||
|
||||
inline uint32_t uint64ToBufferUnsafe(uint64_t v, char *const buffer) {
|
||||
inline uint32_t uint64ToBufferUnsafe(uint64_t v, char* const buffer) {
|
||||
auto const result = digits10(v);
|
||||
// WARNING: using size_t or pointer arithmetic for pos slows down
|
||||
// the loop below 20x. This is because several 32-bit ops can be
|
||||
@ -383,12 +390,12 @@ inline uint32_t uint64ToBufferUnsafe(uint64_t v, char *const buffer) {
|
||||
// Keep these together so a peephole optimization "sees" them and
|
||||
// computes them in one shot.
|
||||
auto const q = v / 10;
|
||||
auto const r = static_cast<uint32_t>(v % 10);
|
||||
buffer[pos--] = '0' + r;
|
||||
auto const r = v % 10;
|
||||
buffer[pos--] = static_cast<char>('0' + r);
|
||||
v = q;
|
||||
}
|
||||
// Last digit is trivial to handle
|
||||
buffer[pos] = static_cast<uint32_t>(v) + '0';
|
||||
buffer[pos] = static_cast<char>(v + '0');
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -396,26 +403,29 @@ inline uint32_t uint64ToBufferUnsafe(uint64_t v, char *const buffer) {
|
||||
* A single char gets appended.
|
||||
*/
|
||||
template <class Tgt>
|
||||
void toAppend(char value, Tgt * result) {
|
||||
void toAppend(char value, Tgt* result) {
|
||||
*result += value;
|
||||
}
|
||||
|
||||
template<class T>
|
||||
constexpr typename std::enable_if<
|
||||
std::is_same<T, char>::value,
|
||||
size_t>::type
|
||||
template <class T>
|
||||
constexpr typename std::enable_if<std::is_same<T, char>::value, size_t>::type
|
||||
estimateSpaceNeeded(T) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
template <size_t N>
|
||||
constexpr size_t estimateSpaceNeeded(const char (&)[N]) {
|
||||
return N;
|
||||
}
|
||||
|
||||
/**
|
||||
* Everything implicitly convertible to const char* gets appended.
|
||||
*/
|
||||
template <class Tgt, class Src>
|
||||
typename std::enable_if<
|
||||
std::is_convertible<Src, const char*>::value
|
||||
&& IsSomeString<Tgt>::value>::type
|
||||
toAppend(Src value, Tgt * result) {
|
||||
std::is_convertible<Src, const char*>::value &&
|
||||
IsSomeString<Tgt>::value>::type
|
||||
toAppend(Src value, Tgt* result) {
|
||||
// Treat null pointers like an empty string, as in:
|
||||
// operator<<(std::ostream&, const char*).
|
||||
const char* c = value;
|
||||
@ -428,19 +438,25 @@ template <class Src>
|
||||
typename std::enable_if<std::is_convertible<Src, const char*>::value, size_t>::
|
||||
type
|
||||
estimateSpaceNeeded(Src value) {
|
||||
const char *c = value;
|
||||
const char* c = value;
|
||||
if (c) {
|
||||
return folly::StringPiece(value).size();
|
||||
};
|
||||
return 0;
|
||||
}
|
||||
|
||||
template<class Src>
|
||||
template <class Src>
|
||||
typename std::enable_if<IsSomeString<Src>::value, size_t>::type
|
||||
estimateSpaceNeeded(Src const& value) {
|
||||
return value.size();
|
||||
}
|
||||
|
||||
template <class Src>
|
||||
typename std::enable_if<
|
||||
(std::is_convertible<Src, folly::StringPiece>::value ||
|
||||
IsSomeString<Src>::value) &&
|
||||
!std::is_convertible<Src, const char*>::value,
|
||||
size_t>::type
|
||||
std::is_convertible<Src, folly::StringPiece>::value &&
|
||||
!IsSomeString<Src>::value &&
|
||||
!std::is_convertible<Src, const char*>::value,
|
||||
size_t>::type
|
||||
estimateSpaceNeeded(Src value) {
|
||||
return folly::StringPiece(value).size();
|
||||
}
|
||||
@ -450,11 +466,11 @@ inline size_t estimateSpaceNeeded(std::nullptr_t /* value */) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
template<class Src>
|
||||
template <class Src>
|
||||
typename std::enable_if<
|
||||
std::is_pointer<Src>::value &&
|
||||
IsSomeString<std::remove_pointer<Src>>::value,
|
||||
size_t>::type
|
||||
std::is_pointer<Src>::value &&
|
||||
IsSomeString<std::remove_pointer<Src>>::value,
|
||||
size_t>::type
|
||||
estimateSpaceNeeded(Src value) {
|
||||
return value->size();
|
||||
}
|
||||
@ -464,8 +480,8 @@ estimateSpaceNeeded(Src value) {
|
||||
*/
|
||||
template <class Tgt, class Src>
|
||||
typename std::enable_if<
|
||||
IsSomeString<Src>::value && IsSomeString<Tgt>::value>::type
|
||||
toAppend(const Src& value, Tgt * result) {
|
||||
IsSomeString<Src>::value && IsSomeString<Tgt>::value>::type
|
||||
toAppend(const Src& value, Tgt* result) {
|
||||
result->append(value);
|
||||
}
|
||||
|
||||
@ -473,9 +489,9 @@ toAppend(const Src& value, Tgt * result) {
|
||||
* and StringPiece objects too
|
||||
*/
|
||||
template <class Tgt>
|
||||
typename std::enable_if<
|
||||
IsSomeString<Tgt>::value>::type
|
||||
toAppend(StringPiece value, Tgt * result) {
|
||||
typename std::enable_if<IsSomeString<Tgt>::value>::type toAppend(
|
||||
StringPiece value,
|
||||
Tgt* result) {
|
||||
result->append(value.data(), value.size());
|
||||
}
|
||||
|
||||
@ -484,9 +500,9 @@ toAppend(StringPiece value, Tgt * result) {
|
||||
* so make a specialization.
|
||||
*/
|
||||
template <class Tgt>
|
||||
typename std::enable_if<
|
||||
IsSomeString<Tgt>::value>::type
|
||||
toAppend(const fbstring& value, Tgt * result) {
|
||||
typename std::enable_if<IsSomeString<Tgt>::value>::type toAppend(
|
||||
const fbstring& value,
|
||||
Tgt* result) {
|
||||
result->append(value.data(), value.size());
|
||||
}
|
||||
|
||||
@ -496,8 +512,7 @@ toAppend(const fbstring& value, Tgt * result) {
|
||||
*/
|
||||
|
||||
template <class Tgt>
|
||||
void
|
||||
toAppend(__int128 value, Tgt * result) {
|
||||
void toAppend(__int128 value, Tgt* result) {
|
||||
typedef unsigned __int128 Usrc;
|
||||
char buffer[detail::digitsEnough<unsigned __int128>() + 1];
|
||||
size_t p;
|
||||
@ -513,8 +528,7 @@ toAppend(__int128 value, Tgt * result) {
|
||||
}
|
||||
|
||||
template <class Tgt>
|
||||
void
|
||||
toAppend(unsigned __int128 value, Tgt * result) {
|
||||
void toAppend(unsigned __int128 value, Tgt* result) {
|
||||
char buffer[detail::digitsEnough<unsigned __int128>()];
|
||||
size_t p;
|
||||
|
||||
@ -523,19 +537,17 @@ toAppend(unsigned __int128 value, Tgt * result) {
|
||||
result->append(buffer + p, buffer + sizeof(buffer));
|
||||
}
|
||||
|
||||
template<class T>
|
||||
constexpr typename std::enable_if<
|
||||
std::is_same<T, __int128>::value,
|
||||
size_t>::type
|
||||
estimateSpaceNeeded(T) {
|
||||
template <class T>
|
||||
constexpr
|
||||
typename std::enable_if<std::is_same<T, __int128>::value, size_t>::type
|
||||
estimateSpaceNeeded(T) {
|
||||
return detail::digitsEnough<__int128>();
|
||||
}
|
||||
|
||||
template<class T>
|
||||
constexpr typename std::enable_if<
|
||||
std::is_same<T, unsigned __int128>::value,
|
||||
size_t>::type
|
||||
estimateSpaceNeeded(T) {
|
||||
template <class T>
|
||||
constexpr typename std::
|
||||
enable_if<std::is_same<T, unsigned __int128>::value, size_t>::type
|
||||
estimateSpaceNeeded(T) {
|
||||
return detail::digitsEnough<unsigned __int128>();
|
||||
}
|
||||
|
||||
@ -551,23 +563,25 @@ estimateSpaceNeeded(T) {
|
||||
*/
|
||||
template <class Tgt, class Src>
|
||||
typename std::enable_if<
|
||||
std::is_integral<Src>::value && std::is_signed<Src>::value &&
|
||||
IsSomeString<Tgt>::value && sizeof(Src) >= 4>::type
|
||||
toAppend(Src value, Tgt * result) {
|
||||
std::is_integral<Src>::value && std::is_signed<Src>::value &&
|
||||
IsSomeString<Tgt>::value && sizeof(Src) >= 4>::type
|
||||
toAppend(Src value, Tgt* result) {
|
||||
char buffer[20];
|
||||
if (value < 0) {
|
||||
result->push_back('-');
|
||||
result->append(buffer, uint64ToBufferUnsafe(-uint64_t(value), buffer));
|
||||
result->append(
|
||||
buffer,
|
||||
uint64ToBufferUnsafe(~static_cast<uint64_t>(value) + 1, buffer));
|
||||
} else {
|
||||
result->append(buffer, uint64ToBufferUnsafe(value, buffer));
|
||||
result->append(buffer, uint64ToBufferUnsafe(uint64_t(value), buffer));
|
||||
}
|
||||
}
|
||||
|
||||
template <class Src>
|
||||
typename std::enable_if<
|
||||
std::is_integral<Src>::value && std::is_signed<Src>::value
|
||||
&& sizeof(Src) >= 4 && sizeof(Src) < 16,
|
||||
size_t>::type
|
||||
std::is_integral<Src>::value && std::is_signed<Src>::value &&
|
||||
sizeof(Src) >= 4 && sizeof(Src) < 16,
|
||||
size_t>::type
|
||||
estimateSpaceNeeded(Src value) {
|
||||
if (value < 0) {
|
||||
// When "value" is the smallest negative, negating it would evoke
|
||||
@ -584,18 +598,18 @@ estimateSpaceNeeded(Src value) {
|
||||
*/
|
||||
template <class Tgt, class Src>
|
||||
typename std::enable_if<
|
||||
std::is_integral<Src>::value && !std::is_signed<Src>::value
|
||||
&& IsSomeString<Tgt>::value && sizeof(Src) >= 4>::type
|
||||
toAppend(Src value, Tgt * result) {
|
||||
std::is_integral<Src>::value && !std::is_signed<Src>::value &&
|
||||
IsSomeString<Tgt>::value && sizeof(Src) >= 4>::type
|
||||
toAppend(Src value, Tgt* result) {
|
||||
char buffer[20];
|
||||
result->append(buffer, uint64ToBufferUnsafe(value, buffer));
|
||||
}
|
||||
|
||||
template <class Src>
|
||||
typename std::enable_if<
|
||||
std::is_integral<Src>::value && !std::is_signed<Src>::value
|
||||
&& sizeof(Src) >= 4 && sizeof(Src) < 16,
|
||||
size_t>::type
|
||||
std::is_integral<Src>::value && !std::is_signed<Src>::value &&
|
||||
sizeof(Src) >= 4 && sizeof(Src) < 16,
|
||||
size_t>::type
|
||||
estimateSpaceNeeded(Src value) {
|
||||
return digits10(value);
|
||||
}
|
||||
@ -606,25 +620,24 @@ estimateSpaceNeeded(Src value) {
|
||||
*/
|
||||
template <class Tgt, class Src>
|
||||
typename std::enable_if<
|
||||
std::is_integral<Src>::value
|
||||
&& IsSomeString<Tgt>::value && sizeof(Src) < 4>::type
|
||||
toAppend(Src value, Tgt * result) {
|
||||
typedef typename
|
||||
std::conditional<std::is_signed<Src>::value, int64_t, uint64_t>::type
|
||||
Intermediate;
|
||||
std::is_integral<Src>::value && IsSomeString<Tgt>::value &&
|
||||
sizeof(Src) < 4>::type
|
||||
toAppend(Src value, Tgt* result) {
|
||||
typedef
|
||||
typename std::conditional<std::is_signed<Src>::value, int64_t, uint64_t>::
|
||||
type Intermediate;
|
||||
toAppend<Tgt>(static_cast<Intermediate>(value), result);
|
||||
}
|
||||
|
||||
template <class Src>
|
||||
typename std::enable_if<
|
||||
std::is_integral<Src>::value
|
||||
&& sizeof(Src) < 4
|
||||
&& !std::is_same<Src, char>::value,
|
||||
size_t>::type
|
||||
std::is_integral<Src>::value && sizeof(Src) < 4 &&
|
||||
!std::is_same<Src, char>::value,
|
||||
size_t>::type
|
||||
estimateSpaceNeeded(Src value) {
|
||||
typedef typename
|
||||
std::conditional<std::is_signed<Src>::value, int64_t, uint64_t>::type
|
||||
Intermediate;
|
||||
typedef
|
||||
typename std::conditional<std::is_signed<Src>::value, int64_t, uint64_t>::
|
||||
type Intermediate;
|
||||
return estimateSpaceNeeded(static_cast<Intermediate>(value));
|
||||
}
|
||||
|
||||
@ -633,15 +646,14 @@ estimateSpaceNeeded(Src value) {
|
||||
*/
|
||||
template <class Tgt, class Src>
|
||||
typename std::enable_if<
|
||||
std::is_enum<Src>::value && IsSomeString<Tgt>::value>::type
|
||||
toAppend(Src value, Tgt * result) {
|
||||
std::is_enum<Src>::value && IsSomeString<Tgt>::value>::type
|
||||
toAppend(Src value, Tgt* result) {
|
||||
toAppend(
|
||||
static_cast<typename std::underlying_type<Src>::type>(value), result);
|
||||
}
|
||||
|
||||
template <class Src>
|
||||
typename std::enable_if<
|
||||
std::is_enum<Src>::value, size_t>::type
|
||||
typename std::enable_if<std::is_enum<Src>::value, size_t>::type
|
||||
estimateSpaceNeeded(Src value) {
|
||||
return estimateSpaceNeeded(
|
||||
static_cast<typename std::underlying_type<Src>::type>(value));
|
||||
@ -654,41 +666,45 @@ estimateSpaceNeeded(Src value) {
|
||||
namespace detail {
|
||||
constexpr int kConvMaxDecimalInShortestLow = -6;
|
||||
constexpr int kConvMaxDecimalInShortestHigh = 21;
|
||||
} // folly::detail
|
||||
} // namespace detail
|
||||
|
||||
/** Wrapper around DoubleToStringConverter **/
|
||||
template <class Tgt, class Src>
|
||||
typename std::enable_if<
|
||||
std::is_floating_point<Src>::value
|
||||
&& IsSomeString<Tgt>::value>::type
|
||||
std::is_floating_point<Src>::value && IsSomeString<Tgt>::value>::type
|
||||
toAppend(
|
||||
Src value,
|
||||
Tgt * result,
|
||||
double_conversion::DoubleToStringConverter::DtoaMode mode,
|
||||
unsigned int numDigits) {
|
||||
Src value,
|
||||
Tgt* result,
|
||||
double_conversion::DoubleToStringConverter::DtoaMode mode,
|
||||
unsigned int numDigits) {
|
||||
using namespace double_conversion;
|
||||
DoubleToStringConverter
|
||||
conv(DoubleToStringConverter::NO_FLAGS,
|
||||
"Infinity", "NaN", 'E',
|
||||
detail::kConvMaxDecimalInShortestLow,
|
||||
detail::kConvMaxDecimalInShortestHigh,
|
||||
6, // max leading padding zeros
|
||||
1); // max trailing padding zeros
|
||||
DoubleToStringConverter conv(
|
||||
DoubleToStringConverter::NO_FLAGS,
|
||||
"Infinity",
|
||||
"NaN",
|
||||
'E',
|
||||
detail::kConvMaxDecimalInShortestLow,
|
||||
detail::kConvMaxDecimalInShortestHigh,
|
||||
6, // max leading padding zeros
|
||||
1); // max trailing padding zeros
|
||||
char buffer[256];
|
||||
StringBuilder builder(buffer, sizeof(buffer));
|
||||
switch (mode) {
|
||||
case DoubleToStringConverter::SHORTEST:
|
||||
conv.ToShortest(value, &builder);
|
||||
break;
|
||||
case DoubleToStringConverter::SHORTEST_SINGLE:
|
||||
conv.ToShortestSingle(static_cast<float>(value), &builder);
|
||||
break;
|
||||
case DoubleToStringConverter::FIXED:
|
||||
conv.ToFixed(value, numDigits, &builder);
|
||||
conv.ToFixed(value, int(numDigits), &builder);
|
||||
break;
|
||||
default:
|
||||
CHECK(mode == DoubleToStringConverter::PRECISION);
|
||||
conv.ToPrecision(value, numDigits, &builder);
|
||||
conv.ToPrecision(value, int(numDigits), &builder);
|
||||
break;
|
||||
}
|
||||
const size_t length = builder.position();
|
||||
const size_t length = size_t(builder.position());
|
||||
builder.Finalize();
|
||||
result->append(buffer, length);
|
||||
}
|
||||
@ -698,11 +714,10 @@ toAppend(
|
||||
*/
|
||||
template <class Tgt, class Src>
|
||||
typename std::enable_if<
|
||||
std::is_floating_point<Src>::value
|
||||
&& IsSomeString<Tgt>::value>::type
|
||||
toAppend(Src value, Tgt * result) {
|
||||
std::is_floating_point<Src>::value && IsSomeString<Tgt>::value>::type
|
||||
toAppend(Src value, Tgt* result) {
|
||||
toAppend(
|
||||
value, result, double_conversion::DoubleToStringConverter::SHORTEST, 0);
|
||||
value, result, double_conversion::DoubleToStringConverter::SHORTEST, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -711,13 +726,12 @@ toAppend(Src value, Tgt * result) {
|
||||
* as used in toAppend(double, string*).
|
||||
*/
|
||||
template <class Src>
|
||||
typename std::enable_if<
|
||||
std::is_floating_point<Src>::value, size_t>::type
|
||||
typename std::enable_if<std::is_floating_point<Src>::value, size_t>::type
|
||||
estimateSpaceNeeded(Src value) {
|
||||
// kBase10MaximalLength is 17. We add 1 for decimal point,
|
||||
// e.g. 10.0/9 is 17 digits and 18 characters, including the decimal point.
|
||||
constexpr int kMaxMantissaSpace =
|
||||
double_conversion::DoubleToStringConverter::kBase10MaximalLength + 1;
|
||||
double_conversion::DoubleToStringConverter::kBase10MaximalLength + 1;
|
||||
// strlen("E-") + digits10(numeric_limits<double>::max_exponent10)
|
||||
constexpr int kMaxExponentSpace = 2 + 3;
|
||||
static const int kMaxPositiveSpace = std::max({
|
||||
@ -728,9 +742,11 @@ estimateSpaceNeeded(Src value) {
|
||||
// If kConvMaxDecimalInShortestHigh is 21, then 1e21 is the smallest
|
||||
// number > 1 which ToShortest outputs in exponential notation,
|
||||
// so 21 is the longest non-exponential number > 1.
|
||||
detail::kConvMaxDecimalInShortestHigh
|
||||
});
|
||||
return kMaxPositiveSpace + (value < 0); // +1 for minus sign, if negative
|
||||
detail::kConvMaxDecimalInShortestHigh,
|
||||
});
|
||||
return size_t(
|
||||
kMaxPositiveSpace +
|
||||
(value < 0 ? 1 : 0)); // +1 for minus sign, if negative
|
||||
}
|
||||
|
||||
/**
|
||||
@ -738,23 +754,22 @@ estimateSpaceNeeded(Src value) {
|
||||
* for estimateSpaceNeed for your type, so that we allocate
|
||||
* as much as you need instead of the default
|
||||
*/
|
||||
template<class Src>
|
||||
template <class Src>
|
||||
struct HasLengthEstimator : std::false_type {};
|
||||
|
||||
template <class Src>
|
||||
constexpr typename std::enable_if<
|
||||
!std::is_fundamental<Src>::value
|
||||
!std::is_fundamental<Src>::value &&
|
||||
#if FOLLY_HAVE_INT128_T
|
||||
// On OSX 10.10, is_fundamental<__int128> is false :-O
|
||||
&& !std::is_same<__int128, Src>::value
|
||||
&& !std::is_same<unsigned __int128, Src>::value
|
||||
// On OSX 10.10, is_fundamental<__int128> is false :-O
|
||||
!std::is_same<__int128, Src>::value &&
|
||||
!std::is_same<unsigned __int128, Src>::value &&
|
||||
#endif
|
||||
&& !IsSomeString<Src>::value
|
||||
&& !std::is_convertible<Src, const char*>::value
|
||||
&& !std::is_convertible<Src, StringPiece>::value
|
||||
&& !std::is_enum<Src>::value
|
||||
&& !HasLengthEstimator<Src>::value,
|
||||
size_t>::type
|
||||
!IsSomeString<Src>::value &&
|
||||
!std::is_convertible<Src, const char*>::value &&
|
||||
!std::is_convertible<Src, StringPiece>::value &&
|
||||
!std::is_enum<Src>::value && !HasLengthEstimator<Src>::value,
|
||||
size_t>::type
|
||||
estimateSpaceNeeded(const Src&) {
|
||||
return sizeof(Src) + 1; // dumbest best effort ever?
|
||||
}
|
||||
@ -772,13 +787,13 @@ size_t estimateSpaceToReserve(size_t sofar, const T& v, const Ts&... vs) {
|
||||
return estimateSpaceToReserve(sofar + estimateSpaceNeeded(v), vs...);
|
||||
}
|
||||
|
||||
template<class...Ts>
|
||||
void reserveInTarget(const Ts&...vs) {
|
||||
template <class... Ts>
|
||||
void reserveInTarget(const Ts&... vs) {
|
||||
getLastElement(vs...)->reserve(estimateSpaceToReserve(0, vs...));
|
||||
}
|
||||
|
||||
template<class Delimiter, class...Ts>
|
||||
void reserveInTargetDelim(const Delimiter& d, const Ts&...vs) {
|
||||
template <class Delimiter, class... Ts>
|
||||
void reserveInTargetDelim(const Delimiter& d, const Ts&... vs) {
|
||||
static_assert(sizeof...(vs) >= 2, "Needs at least 2 args");
|
||||
size_t fordelim = (sizeof...(vs) - 2) *
|
||||
estimateSpaceToReserve(0, d, static_cast<std::string*>(nullptr));
|
||||
@ -790,8 +805,7 @@ void reserveInTargetDelim(const Delimiter& d, const Ts&...vs) {
|
||||
*/
|
||||
template <class T, class Tgt>
|
||||
typename std::enable_if<
|
||||
IsSomeString<typename std::remove_pointer<Tgt>::type>
|
||||
::value>::type
|
||||
IsSomeString<typename std::remove_pointer<Tgt>::type>::value>::type
|
||||
toAppendStrImpl(const T& v, Tgt result) {
|
||||
toAppend(v, result);
|
||||
}
|
||||
@ -826,12 +840,11 @@ toAppendDelimStrImpl(const Delimiter& delim, const T& v, const Ts&... vs) {
|
||||
toAppend(delim, detail::getLastElement(vs...));
|
||||
toAppendDelimStrImpl(delim, vs...);
|
||||
}
|
||||
} // folly::detail
|
||||
|
||||
} // namespace detail
|
||||
|
||||
/**
|
||||
* Variadic conversion to string. Appends each element in turn.
|
||||
* If we have two or more things to append, we it will not reserve
|
||||
* If we have two or more things to append, we will not reserve
|
||||
* the space for them and will depend on strings exponential growth.
|
||||
* If you just append once consider using toAppendFit which reserves
|
||||
* the space needed (but does not have exponential as a result).
|
||||
@ -902,14 +915,15 @@ typename std::enable_if<IsSomeString<Tgt>::value>::type toAppend(
|
||||
*/
|
||||
template <class Delimiter, class Tgt>
|
||||
typename std::enable_if<IsSomeString<Tgt>::value>::type toAppendDelim(
|
||||
const Delimiter& /* delim */, Tgt* /* result */) {}
|
||||
const Delimiter& /* delim */,
|
||||
Tgt* /* result */) {}
|
||||
|
||||
/**
|
||||
* 1 element: same as toAppend.
|
||||
*/
|
||||
template <class Delimiter, class T, class Tgt>
|
||||
typename std::enable_if<IsSomeString<Tgt>::value>::type toAppendDelim(
|
||||
const Delimiter& /* delim */, const T& v, Tgt* tgt) {
|
||||
typename std::enable_if<IsSomeString<Tgt>::value>::type
|
||||
toAppendDelim(const Delimiter& /* delim */, const T& v, Tgt* tgt) {
|
||||
toAppend(v, tgt);
|
||||
}
|
||||
|
||||
@ -982,11 +996,12 @@ to(Src value) {
|
||||
* toDelim<SomeString>(SomeString str) returns itself.
|
||||
*/
|
||||
template <class Tgt, class Delim, class Src>
|
||||
typename std::enable_if<IsSomeString<Tgt>::value &&
|
||||
std::is_same<Tgt, Src>::value,
|
||||
Tgt>::type
|
||||
toDelim(const Delim& /* delim */, const Src& value) {
|
||||
return value;
|
||||
typename std::enable_if<
|
||||
IsSomeString<Tgt>::value &&
|
||||
std::is_same<Tgt, typename std::decay<Src>::type>::value,
|
||||
Tgt>::type
|
||||
toDelim(const Delim& /* delim */, Src&& value) {
|
||||
return std::forward<Src>(value);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1161,7 +1176,7 @@ to(const char* b, const char* e) {
|
||||
* Parsing strings to numeric types.
|
||||
*/
|
||||
template <typename Tgt>
|
||||
FOLLY_WARN_UNUSED_RESULT inline typename std::enable_if<
|
||||
FOLLY_NODISCARD inline typename std::enable_if<
|
||||
std::is_arithmetic<Tgt>::value,
|
||||
Expected<StringPiece, ConversionCode>>::type
|
||||
parseTo(StringPiece src, Tgt& out) {
|
||||
@ -1175,6 +1190,20 @@ parseTo(StringPiece src, Tgt& out) {
|
||||
|
||||
namespace detail {
|
||||
|
||||
/**
|
||||
* Bool to integral/float doesn't need any special checks, and this
|
||||
* overload means we aren't trying to see if a bool is less than
|
||||
* an integer.
|
||||
*/
|
||||
template <class Tgt>
|
||||
typename std::enable_if<
|
||||
!std::is_same<Tgt, bool>::value &&
|
||||
(std::is_integral<Tgt>::value || std::is_floating_point<Tgt>::value),
|
||||
Expected<Tgt, ConversionCode>>::type
|
||||
convertTo(const bool& value) noexcept {
|
||||
return static_cast<Tgt>(value ? 1 : 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checked conversion from integral to integral. The checks are only
|
||||
* performed when meaningful, e.g. conversion from int to long goes
|
||||
@ -1183,17 +1212,17 @@ namespace detail {
|
||||
template <class Tgt, class Src>
|
||||
typename std::enable_if<
|
||||
std::is_integral<Src>::value && !std::is_same<Tgt, Src>::value &&
|
||||
!std::is_same<Tgt, bool>::value &&
|
||||
std::is_integral<Tgt>::value,
|
||||
!std::is_same<Tgt, bool>::value && std::is_integral<Tgt>::value,
|
||||
Expected<Tgt, ConversionCode>>::type
|
||||
convertTo(const Src& value) noexcept {
|
||||
/* static */ if (
|
||||
std::numeric_limits<Tgt>::max() < std::numeric_limits<Src>::max()) {
|
||||
if /* constexpr */ (
|
||||
folly::_t<std::make_unsigned<Tgt>>(std::numeric_limits<Tgt>::max()) <
|
||||
folly::_t<std::make_unsigned<Src>>(std::numeric_limits<Src>::max())) {
|
||||
if (greater_than<Tgt, std::numeric_limits<Tgt>::max()>(value)) {
|
||||
return makeUnexpected(ConversionCode::ARITH_POSITIVE_OVERFLOW);
|
||||
}
|
||||
}
|
||||
/* static */ if (
|
||||
if /* constexpr */ (
|
||||
std::is_signed<Src>::value &&
|
||||
(!std::is_signed<Tgt>::value || sizeof(Src) > sizeof(Tgt))) {
|
||||
if (less_than<Tgt, std::numeric_limits<Tgt>::min()>(value)) {
|
||||
@ -1214,7 +1243,7 @@ typename std::enable_if<
|
||||
!std::is_same<Tgt, Src>::value,
|
||||
Expected<Tgt, ConversionCode>>::type
|
||||
convertTo(const Src& value) noexcept {
|
||||
/* static */ if (
|
||||
if /* constexpr */ (
|
||||
std::numeric_limits<Tgt>::max() < std::numeric_limits<Src>::max()) {
|
||||
if (value > std::numeric_limits<Tgt>::max()) {
|
||||
return makeUnexpected(ConversionCode::ARITH_POSITIVE_OVERFLOW);
|
||||
@ -1223,7 +1252,7 @@ convertTo(const Src& value) noexcept {
|
||||
return makeUnexpected(ConversionCode::ARITH_NEGATIVE_OVERFLOW);
|
||||
}
|
||||
}
|
||||
return boost::implicit_cast<Tgt>(value);
|
||||
return static_cast<Tgt>(value);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1314,11 +1343,9 @@ inline std::string errorValue(const Src& value) {
|
||||
}
|
||||
|
||||
template <typename Tgt, typename Src>
|
||||
using IsArithToArith = std::integral_constant<
|
||||
bool,
|
||||
using IsArithToArith = bool_constant<
|
||||
!std::is_same<Tgt, Src>::value && !std::is_same<Tgt, bool>::value &&
|
||||
std::is_arithmetic<Src>::value &&
|
||||
std::is_arithmetic<Tgt>::value>;
|
||||
std::is_arithmetic<Src>::value && std::is_arithmetic<Tgt>::value>;
|
||||
|
||||
} // namespace detail
|
||||
|
||||
@ -1353,7 +1380,7 @@ typename std::enable_if<detail::IsArithToArith<Tgt, Src>::value, Tgt>::type to(
|
||||
* }
|
||||
******************************************************************************/
|
||||
template <class T>
|
||||
FOLLY_WARN_UNUSED_RESULT typename std::enable_if<
|
||||
FOLLY_NODISCARD typename std::enable_if<
|
||||
std::is_enum<T>::value,
|
||||
Expected<StringPiece, ConversionCode>>::type
|
||||
parseTo(StringPiece in, T& out) noexcept {
|
||||
@ -1363,7 +1390,7 @@ parseTo(StringPiece in, T& out) noexcept {
|
||||
return restOrError;
|
||||
}
|
||||
|
||||
FOLLY_WARN_UNUSED_RESULT
|
||||
FOLLY_NODISCARD
|
||||
inline Expected<StringPiece, ConversionCode> parseTo(
|
||||
StringPiece in,
|
||||
StringPiece& out) noexcept {
|
||||
@ -1371,7 +1398,7 @@ inline Expected<StringPiece, ConversionCode> parseTo(
|
||||
return StringPiece{in.end(), in.end()};
|
||||
}
|
||||
|
||||
FOLLY_WARN_UNUSED_RESULT
|
||||
FOLLY_NODISCARD
|
||||
inline Expected<StringPiece, ConversionCode> parseTo(
|
||||
StringPiece in,
|
||||
std::string& out) {
|
||||
@ -1380,7 +1407,7 @@ inline Expected<StringPiece, ConversionCode> parseTo(
|
||||
return StringPiece{in.end(), in.end()};
|
||||
}
|
||||
|
||||
FOLLY_WARN_UNUSED_RESULT
|
||||
FOLLY_NODISCARD
|
||||
inline Expected<StringPiece, ConversionCode> parseTo(
|
||||
StringPiece in,
|
||||
fbstring& out) {
|
||||
@ -1396,8 +1423,9 @@ using ParseToResult = decltype(parseTo(StringPiece{}, std::declval<Tgt&>()));
|
||||
struct CheckTrailingSpace {
|
||||
Expected<Unit, ConversionCode> operator()(StringPiece sp) const {
|
||||
auto e = enforceWhitespaceErr(sp);
|
||||
if (UNLIKELY(e != ConversionCode::SUCCESS))
|
||||
if (UNLIKELY(e != ConversionCode::SUCCESS)) {
|
||||
return makeUnexpected(e);
|
||||
}
|
||||
return unit;
|
||||
}
|
||||
};
|
||||
@ -1456,6 +1484,14 @@ tryTo(StringPiece src) {
|
||||
});
|
||||
}
|
||||
|
||||
template <class Tgt, class Src>
|
||||
inline typename std::enable_if<
|
||||
IsSomeString<Src>::value && !std::is_same<StringPiece, Tgt>::value,
|
||||
Tgt>::type
|
||||
to(Src const& src) {
|
||||
return to<Tgt>(StringPiece(src.data(), src.size()));
|
||||
}
|
||||
|
||||
template <class Tgt>
|
||||
inline
|
||||
typename std::enable_if<!std::is_same<StringPiece, Tgt>::value, Tgt>::type
|
||||
@ -1468,10 +1504,14 @@ inline
|
||||
detail::ReturnUnit<Error>>::type;
|
||||
auto tmp = detail::parseToWrap(src, result);
|
||||
return tmp
|
||||
.thenOrThrow(Check(), [&](Error e) { throw makeConversionError(e, src); })
|
||||
.thenOrThrow(
|
||||
Check(),
|
||||
[&](Error e) { throw_exception(makeConversionError(e, src)); })
|
||||
.thenOrThrow(
|
||||
[&](Unit) { return std::move(result); },
|
||||
[&](Error e) { throw makeConversionError(e, tmp.value()); });
|
||||
[&](Error e) {
|
||||
throw_exception(makeConversionError(e, tmp.value()));
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1507,7 +1547,8 @@ Tgt to(StringPiece* src) {
|
||||
|
||||
template <class Tgt, class Src>
|
||||
typename std::enable_if<
|
||||
std::is_enum<Src>::value && !std::is_same<Src, Tgt>::value,
|
||||
std::is_enum<Src>::value && !std::is_same<Src, Tgt>::value &&
|
||||
!std::is_convertible<Tgt, StringPiece>::value,
|
||||
Expected<Tgt, ConversionCode>>::type
|
||||
tryTo(const Src& value) {
|
||||
using I = typename std::underlying_type<Src>::type;
|
||||
@ -1516,8 +1557,9 @@ tryTo(const Src& value) {
|
||||
|
||||
template <class Tgt, class Src>
|
||||
typename std::enable_if<
|
||||
std::is_enum<Tgt>::value && !std::is_same<Src, Tgt>::value,
|
||||
Tgt>::type
|
||||
!std::is_convertible<Src, StringPiece>::value && std::is_enum<Tgt>::value &&
|
||||
!std::is_same<Src, Tgt>::value,
|
||||
Expected<Tgt, ConversionCode>>::type
|
||||
tryTo(const Src& value) {
|
||||
using I = typename std::underlying_type<Tgt>::type;
|
||||
return tryTo<I>(value).then([](I i) { return static_cast<Tgt>(i); });
|
||||
@ -1525,7 +1567,8 @@ tryTo(const Src& value) {
|
||||
|
||||
template <class Tgt, class Src>
|
||||
typename std::enable_if<
|
||||
std::is_enum<Src>::value && !std::is_same<Src, Tgt>::value,
|
||||
std::is_enum<Src>::value && !std::is_same<Src, Tgt>::value &&
|
||||
!std::is_convertible<Tgt, StringPiece>::value,
|
||||
Tgt>::type
|
||||
to(const Src& value) {
|
||||
return to<Tgt>(static_cast<typename std::underlying_type<Src>::type>(value));
|
||||
@ -1533,8 +1576,10 @@ to(const Src& value) {
|
||||
|
||||
template <class Tgt, class Src>
|
||||
typename std::enable_if<
|
||||
std::is_enum<Tgt>::value && !std::is_same<Src, Tgt>::value, Tgt>::type
|
||||
to(const Src & value) {
|
||||
!std::is_convertible<Src, StringPiece>::value && std::is_enum<Tgt>::value &&
|
||||
!std::is_same<Src, Tgt>::value,
|
||||
Tgt>::type
|
||||
to(const Src& value) {
|
||||
return static_cast<Tgt>(to<typename std::underlying_type<Tgt>::type>(value));
|
||||
}
|
||||
|
||||
|
48
ios/Pods/Folly/folly/CppAttributes.h
generated
48
ios/Pods/Folly/folly/CppAttributes.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2015-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -22,6 +22,12 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifndef __has_attribute
|
||||
#define FOLLY_HAS_ATTRIBUTE(x) 0
|
||||
#else
|
||||
#define FOLLY_HAS_ATTRIBUTE(x) __has_attribute(x)
|
||||
#endif
|
||||
|
||||
#ifndef __has_cpp_attribute
|
||||
#define FOLLY_HAS_CPP_ATTRIBUTE(x) 0
|
||||
#else
|
||||
@ -47,12 +53,36 @@
|
||||
* FOLLY_FALLTHROUGH; // no warning: annotated fall-through
|
||||
* }
|
||||
*/
|
||||
#if FOLLY_HAS_CPP_ATTRIBUTE(clang::fallthrough)
|
||||
#if FOLLY_HAS_CPP_ATTRIBUTE(fallthrough)
|
||||
#define FOLLY_FALLTHROUGH [[fallthrough]]
|
||||
#elif FOLLY_HAS_CPP_ATTRIBUTE(clang::fallthrough)
|
||||
#define FOLLY_FALLTHROUGH [[clang::fallthrough]]
|
||||
#elif FOLLY_HAS_CPP_ATTRIBUTE(gnu::fallthrough)
|
||||
#define FOLLY_FALLTHROUGH [[gnu::fallthrough]]
|
||||
#else
|
||||
#define FOLLY_FALLTHROUGH
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Maybe_unused indicates that a function, variable or parameter might or
|
||||
* might not be used, e.g.
|
||||
*
|
||||
* int foo(FOLLY_MAYBE_UNUSED int x) {
|
||||
* #ifdef USE_X
|
||||
* return x;
|
||||
* #else
|
||||
* return 0;
|
||||
* #endif
|
||||
* }
|
||||
*/
|
||||
#if FOLLY_HAS_CPP_ATTRIBUTE(maybe_unused)
|
||||
#define FOLLY_MAYBE_UNUSED [[maybe_unused]]
|
||||
#elif FOLLY_HAS_ATTRIBUTE(__unused__) || __GNUC__
|
||||
#define FOLLY_MAYBE_UNUSED __attribute__((__unused__))
|
||||
#else
|
||||
#define FOLLY_MAYBE_UNUSED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Nullable indicates that a return value or a parameter may be a `nullptr`,
|
||||
* e.g.
|
||||
@ -72,6 +102,20 @@
|
||||
*/
|
||||
#if FOLLY_HAS_EXTENSION(nullability)
|
||||
#define FOLLY_NULLABLE _Nullable
|
||||
#define FOLLY_NONNULL _Nonnull
|
||||
#else
|
||||
#define FOLLY_NULLABLE
|
||||
#define FOLLY_NONNULL
|
||||
#endif
|
||||
|
||||
/**
|
||||
* "Cold" indicates to the compiler that a function is only expected to be
|
||||
* called from unlikely code paths. It can affect decisions made by the
|
||||
* optimizer both when processing the function body and when analyzing
|
||||
* call-sites.
|
||||
*/
|
||||
#if __GNUC__
|
||||
#define FOLLY_COLD __attribute__((__cold__))
|
||||
#else
|
||||
#define FOLLY_COLD
|
||||
#endif
|
||||
|
40
ios/Pods/Folly/folly/CpuId.h
generated
40
ios/Pods/Folly/folly/CpuId.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2012-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -17,6 +17,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
#include <folly/Portability.h>
|
||||
|
||||
#ifdef _MSC_VER
|
||||
@ -28,35 +29,36 @@ namespace folly {
|
||||
/**
|
||||
* Identification of an Intel CPU.
|
||||
* Supports CPUID feature flags (EAX=1) and extended features (EAX=7, ECX=0).
|
||||
* Values from http://www.intel.com/content/www/us/en/processors/processor-identification-cpuid-instruction-note.html
|
||||
* Values from
|
||||
* http://www.intel.com/content/www/us/en/processors/processor-identification-cpuid-instruction-note.html
|
||||
*/
|
||||
class CpuId {
|
||||
public:
|
||||
// Always inline in order for this to be usable from a __ifunc__.
|
||||
// In shared library mde, a __ifunc__ runs at relocation time, while the
|
||||
// In shared library mode, a __ifunc__ runs at relocation time, while the
|
||||
// PLT hasn't been fully populated yet; thus, ifuncs cannot use symbols
|
||||
// with potentially external linkage. (This issue is less likely in opt
|
||||
// mode since inlining happens more likely, and it doesn't happen for
|
||||
// statically linked binaries which don't depend on the PLT)
|
||||
FOLLY_ALWAYS_INLINE CpuId() {
|
||||
#ifdef _MSC_VER
|
||||
#if defined(_MSC_VER) && (FOLLY_X64 || defined(_M_IX86))
|
||||
int reg[4];
|
||||
__cpuid(static_cast<int*>(reg), 0);
|
||||
const int n = reg[0];
|
||||
if (n >= 1) {
|
||||
__cpuid(static_cast<int*>(reg), 1);
|
||||
f1c_ = reg[2];
|
||||
f1d_ = reg[3];
|
||||
f1c_ = uint32_t(reg[2]);
|
||||
f1d_ = uint32_t(reg[3]);
|
||||
}
|
||||
if (n >= 7) {
|
||||
__cpuidex(static_cast<int*>(reg), 7, 0);
|
||||
f7b_ = reg[1];
|
||||
f7c_ = reg[2];
|
||||
f7b_ = uint32_t(reg[1]);
|
||||
f7c_ = uint32_t(reg[2]);
|
||||
}
|
||||
#elif defined(__i386__) && defined(__PIC__) && !defined(__clang__) && \
|
||||
defined(__GNUC__)
|
||||
// The following block like the normal cpuid branch below, but gcc
|
||||
// reserves ebx for use of it's pic register so we must specially
|
||||
// reserves ebx for use of its pic register so we must specially
|
||||
// handle the save and restore to avoid clobbering the register
|
||||
uint32_t n;
|
||||
__asm__(
|
||||
@ -65,13 +67,14 @@ class CpuId {
|
||||
"popl %%ebx\n\t"
|
||||
: "=a"(n)
|
||||
: "a"(0)
|
||||
: "edx", "ecx");
|
||||
: "ecx", "edx");
|
||||
if (n >= 1) {
|
||||
uint32_t f1a;
|
||||
__asm__(
|
||||
"pushl %%ebx\n\t"
|
||||
"cpuid\n\t"
|
||||
"popl %%ebx\n\t"
|
||||
: "=c"(f1c_), "=d"(f1d_)
|
||||
: "=a"(f1a), "=c"(f1c_), "=d"(f1d_)
|
||||
: "a"(1)
|
||||
:);
|
||||
}
|
||||
@ -87,19 +90,24 @@ class CpuId {
|
||||
}
|
||||
#elif FOLLY_X64 || defined(__i386__)
|
||||
uint32_t n;
|
||||
__asm__("cpuid" : "=a"(n) : "a"(0) : "ebx", "edx", "ecx");
|
||||
__asm__("cpuid" : "=a"(n) : "a"(0) : "ebx", "ecx", "edx");
|
||||
if (n >= 1) {
|
||||
__asm__("cpuid" : "=c"(f1c_), "=d"(f1d_) : "a"(1) : "ebx");
|
||||
uint32_t f1a;
|
||||
__asm__("cpuid" : "=a"(f1a), "=c"(f1c_), "=d"(f1d_) : "a"(1) : "ebx");
|
||||
}
|
||||
if (n >= 7) {
|
||||
__asm__("cpuid" : "=b"(f7b_), "=c"(f7c_) : "a"(7), "c"(0) : "edx");
|
||||
uint32_t f7a;
|
||||
__asm__("cpuid"
|
||||
: "=a"(f7a), "=b"(f7b_), "=c"(f7c_)
|
||||
: "a"(7), "c"(0)
|
||||
: "edx");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#define X(name, r, bit) \
|
||||
FOLLY_ALWAYS_INLINE bool name() const { \
|
||||
return (r) & (1U << bit); \
|
||||
return ((r) & (1U << bit)) != 0; \
|
||||
}
|
||||
|
||||
// cpuid(1): Processor Info and Feature Bits.
|
||||
@ -207,4 +215,4 @@ class CpuId {
|
||||
uint32_t f7c_ = 0;
|
||||
};
|
||||
|
||||
} // namespace folly
|
||||
} // namespace folly
|
||||
|
150
ios/Pods/Folly/folly/DefaultKeepAliveExecutor.h
generated
Normal file
150
ios/Pods/Folly/folly/DefaultKeepAliveExecutor.h
generated
Normal file
@ -0,0 +1,150 @@
|
||||
/*
|
||||
* Copyright 2018-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <future>
|
||||
|
||||
#include <glog/logging.h>
|
||||
|
||||
#include <folly/Executor.h>
|
||||
#include <folly/synchronization/Baton.h>
|
||||
|
||||
namespace folly {
|
||||
|
||||
/// An Executor accepts units of work with add(), which should be
|
||||
/// threadsafe.
|
||||
class DefaultKeepAliveExecutor : public virtual Executor {
|
||||
public:
|
||||
DefaultKeepAliveExecutor() : Executor() {}
|
||||
|
||||
virtual ~DefaultKeepAliveExecutor() {
|
||||
DCHECK(!keepAlive_);
|
||||
}
|
||||
|
||||
folly::Executor::KeepAlive<> weakRef() {
|
||||
return WeakRef::create(controlBlock_, this);
|
||||
}
|
||||
|
||||
protected:
|
||||
void joinKeepAlive() {
|
||||
DCHECK(keepAlive_);
|
||||
keepAlive_.reset();
|
||||
keepAliveReleaseBaton_.wait();
|
||||
}
|
||||
|
||||
private:
|
||||
struct ControlBlock {
|
||||
std::atomic<ssize_t> keepAliveCount_{1};
|
||||
};
|
||||
|
||||
class WeakRef : public Executor {
|
||||
public:
|
||||
static folly::Executor::KeepAlive<> create(
|
||||
std::shared_ptr<ControlBlock> controlBlock,
|
||||
Executor* executor) {
|
||||
return makeKeepAlive(new WeakRef(std::move(controlBlock), executor));
|
||||
}
|
||||
|
||||
void add(Func f) override {
|
||||
if (auto executor = lock()) {
|
||||
executor->add(std::move(f));
|
||||
}
|
||||
}
|
||||
|
||||
void addWithPriority(Func f, int8_t priority) override {
|
||||
if (auto executor = lock()) {
|
||||
executor->addWithPriority(std::move(f), priority);
|
||||
}
|
||||
}
|
||||
|
||||
virtual uint8_t getNumPriorities() const override {
|
||||
return numPriorities_;
|
||||
}
|
||||
|
||||
private:
|
||||
WeakRef(std::shared_ptr<ControlBlock> controlBlock, Executor* executor)
|
||||
: controlBlock_(std::move(controlBlock)),
|
||||
executor_(executor),
|
||||
numPriorities_(executor->getNumPriorities()) {}
|
||||
|
||||
bool keepAliveAcquire() override {
|
||||
auto keepAliveCount =
|
||||
keepAliveCount_.fetch_add(1, std::memory_order_relaxed);
|
||||
// We should never increment from 0
|
||||
DCHECK(keepAliveCount > 0);
|
||||
return true;
|
||||
}
|
||||
|
||||
void keepAliveRelease() override {
|
||||
auto keepAliveCount =
|
||||
keepAliveCount_.fetch_sub(1, std::memory_order_acq_rel);
|
||||
DCHECK(keepAliveCount >= 1);
|
||||
|
||||
if (keepAliveCount == 1) {
|
||||
delete this;
|
||||
}
|
||||
}
|
||||
|
||||
folly::Executor::KeepAlive<> lock() {
|
||||
auto controlBlock =
|
||||
controlBlock_->keepAliveCount_.load(std::memory_order_relaxed);
|
||||
do {
|
||||
if (controlBlock == 0) {
|
||||
return {};
|
||||
}
|
||||
} while (!controlBlock_->keepAliveCount_.compare_exchange_weak(
|
||||
controlBlock,
|
||||
controlBlock + 1,
|
||||
std::memory_order_release,
|
||||
std::memory_order_relaxed));
|
||||
|
||||
return makeKeepAlive(executor_);
|
||||
}
|
||||
|
||||
std::atomic<size_t> keepAliveCount_{1};
|
||||
|
||||
std::shared_ptr<ControlBlock> controlBlock_;
|
||||
Executor* executor_;
|
||||
|
||||
uint8_t numPriorities_;
|
||||
};
|
||||
|
||||
bool keepAliveAcquire() override {
|
||||
auto keepAliveCount =
|
||||
controlBlock_->keepAliveCount_.fetch_add(1, std::memory_order_relaxed);
|
||||
// We should never increment from 0
|
||||
DCHECK(keepAliveCount > 0);
|
||||
return true;
|
||||
}
|
||||
|
||||
void keepAliveRelease() override {
|
||||
auto keepAliveCount =
|
||||
controlBlock_->keepAliveCount_.fetch_sub(1, std::memory_order_acquire);
|
||||
DCHECK(keepAliveCount >= 1);
|
||||
|
||||
if (keepAliveCount == 1) {
|
||||
keepAliveReleaseBaton_.post(); // std::memory_order_release
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<ControlBlock> controlBlock_{std::make_shared<ControlBlock>()};
|
||||
Baton<> keepAliveReleaseBaton_;
|
||||
KeepAlive<DefaultKeepAliveExecutor> keepAlive_{
|
||||
makeKeepAlive<DefaultKeepAliveExecutor>(this)};
|
||||
};
|
||||
|
||||
} // namespace folly
|
57
ios/Pods/Folly/folly/Demangle.cpp
generated
57
ios/Pods/Folly/folly/Demangle.cpp
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2014-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -17,48 +17,20 @@
|
||||
#include <folly/Demangle.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <string.h>
|
||||
#include <cstring>
|
||||
|
||||
#include <folly/Malloc.h>
|
||||
#include <folly/detail/Demangle.h>
|
||||
#include <folly/portability/Config.h>
|
||||
|
||||
#if FOLLY_HAVE_CPLUS_DEMANGLE_V3_CALLBACK
|
||||
# include <cxxabi.h>
|
||||
#if FOLLY_DETAIL_HAVE_DEMANGLE_H
|
||||
|
||||
// From libiberty
|
||||
//
|
||||
// TODO(tudorb): Detect this with autoconf for the open-source version.
|
||||
//
|
||||
// __attribute__((__weak__)) doesn't work, because cplus_demangle_v3_callback
|
||||
// is exported by an object file in libiberty.a, and the ELF spec says
|
||||
// "The link editor does not extract archive members to resolve undefined weak
|
||||
// symbols" (but, interestingly enough, will resolve undefined weak symbols
|
||||
// with definitions from archive members that were extracted in order to
|
||||
// resolve an undefined global (strong) symbol)
|
||||
|
||||
# ifndef DMGL_NO_OPTS
|
||||
# define FOLLY_DEFINED_DMGL 1
|
||||
# define DMGL_NO_OPTS 0 /* For readability... */
|
||||
# define DMGL_PARAMS (1 << 0) /* Include function args */
|
||||
# define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */
|
||||
# define DMGL_JAVA (1 << 2) /* Demangle as Java rather than C++. */
|
||||
# define DMGL_VERBOSE (1 << 3) /* Include implementation details. */
|
||||
# define DMGL_TYPES (1 << 4) /* Also try to demangle type encodings. */
|
||||
# define DMGL_RET_POSTFIX (1 << 5) /* Print function return types (when
|
||||
present) after function signature */
|
||||
# endif
|
||||
|
||||
extern "C" int cplus_demangle_v3_callback(
|
||||
const char* mangled,
|
||||
int options, // We use DMGL_PARAMS | DMGL_TYPES, aka 0x11
|
||||
void (*callback)(const char*, size_t, void*),
|
||||
void* arg);
|
||||
#include <cxxabi.h>
|
||||
|
||||
#endif
|
||||
|
||||
namespace folly {
|
||||
|
||||
#if FOLLY_HAVE_CPLUS_DEMANGLE_V3_CALLBACK
|
||||
#if FOLLY_DETAIL_HAVE_DEMANGLE_H
|
||||
|
||||
fbstring demangle(const char* name) {
|
||||
#ifdef FOLLY_DEMANGLE_MAX_SYMBOL_SIZE
|
||||
@ -102,7 +74,7 @@ void demangleCallback(const char* str, size_t size, void* p) {
|
||||
buf->total += size;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
} // namespace
|
||||
|
||||
size_t demangle(const char* name, char* out, size_t outSize) {
|
||||
#ifdef FOLLY_DEMANGLE_MAX_SYMBOL_SIZE
|
||||
@ -119,16 +91,13 @@ size_t demangle(const char* name, char* out, size_t outSize) {
|
||||
|
||||
DemangleBuf dbuf;
|
||||
dbuf.dest = out;
|
||||
dbuf.remaining = outSize ? outSize - 1 : 0; // leave room for null term
|
||||
dbuf.remaining = outSize ? outSize - 1 : 0; // leave room for null term
|
||||
dbuf.total = 0;
|
||||
|
||||
// Unlike most library functions, this returns 1 on success and 0 on failure
|
||||
int status = cplus_demangle_v3_callback(
|
||||
name,
|
||||
DMGL_PARAMS | DMGL_ANSI | DMGL_TYPES,
|
||||
demangleCallback,
|
||||
&dbuf);
|
||||
if (status == 0) { // failed, return original
|
||||
int status =
|
||||
detail::cplus_demangle_v3_callback_wrapper(name, demangleCallback, &dbuf);
|
||||
if (status == 0) { // failed, return original
|
||||
return folly::strlcpy(out, name, outSize);
|
||||
}
|
||||
if (outSize != 0) {
|
||||
@ -152,11 +121,11 @@ size_t demangle(const char* name, char* out, size_t outSize) {
|
||||
size_t strlcpy(char* dest, const char* const src, size_t size) {
|
||||
size_t len = strlen(src);
|
||||
if (size != 0) {
|
||||
size_t n = std::min(len, size - 1); // always null terminate!
|
||||
size_t n = std::min(len, size - 1); // always null terminate!
|
||||
memcpy(dest, src, n);
|
||||
dest[n] = '\0';
|
||||
}
|
||||
return len;
|
||||
}
|
||||
|
||||
} // folly
|
||||
} // namespace folly
|
||||
|
4
ios/Pods/Folly/folly/Demangle.h
generated
4
ios/Pods/Folly/folly/Demangle.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2014-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -62,4 +62,4 @@ inline size_t demangle(const std::type_info& type, char* buf, size_t bufSize) {
|
||||
// glibc doesn't have strlcpy
|
||||
size_t strlcpy(char* dest, const char* const src, size_t size);
|
||||
|
||||
}
|
||||
} // namespace folly
|
||||
|
63
ios/Pods/Folly/folly/DiscriminatedPtr.h
generated
63
ios/Pods/Folly/folly/DiscriminatedPtr.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2011-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -28,13 +28,15 @@
|
||||
|
||||
#include <limits>
|
||||
#include <stdexcept>
|
||||
|
||||
#include <glog/logging.h>
|
||||
|
||||
#include <folly/Likely.h>
|
||||
#include <folly/Portability.h>
|
||||
#include <folly/detail/DiscriminatedPtrDetail.h>
|
||||
|
||||
#if !FOLLY_X64 && !FOLLY_A64 && !FOLLY_PPC64
|
||||
# error "DiscriminatedPtr is x64, arm64 and ppc64 specific code."
|
||||
#if !FOLLY_X64 && !FOLLY_AARCH64 && !FOLLY_PPC64
|
||||
#error "DiscriminatedPtr is x64, arm64 and ppc64 specific code."
|
||||
#endif
|
||||
|
||||
namespace folly {
|
||||
@ -54,15 +56,15 @@ namespace folly {
|
||||
template <typename... Types>
|
||||
class DiscriminatedPtr {
|
||||
// <, not <=, as our indexes are 1-based (0 means "empty")
|
||||
static_assert(sizeof...(Types) < std::numeric_limits<uint16_t>::max(),
|
||||
"too many types");
|
||||
static_assert(
|
||||
sizeof...(Types) < std::numeric_limits<uint16_t>::max(),
|
||||
"too many types");
|
||||
|
||||
public:
|
||||
/**
|
||||
* Create an empty DiscriminatedPtr.
|
||||
*/
|
||||
DiscriminatedPtr() : data_(0) {
|
||||
}
|
||||
DiscriminatedPtr() : data_(0) {}
|
||||
|
||||
/**
|
||||
* Create a DiscriminatedPtr that points to an object of type T.
|
||||
@ -171,18 +173,22 @@ class DiscriminatedPtr {
|
||||
template <typename V>
|
||||
typename dptr_detail::VisitorResult<V, Types...>::type apply(V&& visitor) {
|
||||
size_t n = index();
|
||||
if (n == 0) throw std::invalid_argument("Empty DiscriminatedPtr");
|
||||
if (n == 0) {
|
||||
throw std::invalid_argument("Empty DiscriminatedPtr");
|
||||
}
|
||||
return dptr_detail::ApplyVisitor<V, Types...>()(
|
||||
n, std::forward<V>(visitor), ptr());
|
||||
n, std::forward<V>(visitor), ptr());
|
||||
}
|
||||
|
||||
template <typename V>
|
||||
typename dptr_detail::ConstVisitorResult<V, Types...>::type apply(V&& visitor)
|
||||
const {
|
||||
typename dptr_detail::ConstVisitorResult<V, Types...>::type apply(
|
||||
V&& visitor) const {
|
||||
size_t n = index();
|
||||
if (n == 0) throw std::invalid_argument("Empty DiscriminatedPtr");
|
||||
if (n == 0) {
|
||||
throw std::invalid_argument("Empty DiscriminatedPtr");
|
||||
}
|
||||
return dptr_detail::ApplyConstVisitor<V, Types...>()(
|
||||
n, std::forward<V>(visitor), ptr());
|
||||
n, std::forward<V>(visitor), ptr());
|
||||
}
|
||||
|
||||
private:
|
||||
@ -190,11 +196,13 @@ class DiscriminatedPtr {
|
||||
* Get the 1-based type index of T in Types.
|
||||
*/
|
||||
template <typename T>
|
||||
size_t typeIndex() const {
|
||||
return dptr_detail::GetTypeIndex<T, Types...>::value;
|
||||
uint16_t typeIndex() const {
|
||||
return uint16_t(dptr_detail::GetTypeIndex<T, Types...>::value);
|
||||
}
|
||||
|
||||
uint16_t index() const { return data_ >> 48; }
|
||||
uint16_t index() const {
|
||||
return data_ >> 48;
|
||||
}
|
||||
void* ptr() const {
|
||||
return reinterpret_cast<void*>(data_ & ((1ULL << 48) - 1));
|
||||
}
|
||||
@ -215,4 +223,25 @@ class DiscriminatedPtr {
|
||||
uintptr_t data_;
|
||||
};
|
||||
|
||||
} // namespace folly
|
||||
template <typename Visitor, typename... Args>
|
||||
decltype(auto) apply_visitor(
|
||||
Visitor&& visitor,
|
||||
const DiscriminatedPtr<Args...>& variant) {
|
||||
return variant.apply(std::forward<Visitor>(visitor));
|
||||
}
|
||||
|
||||
template <typename Visitor, typename... Args>
|
||||
decltype(auto) apply_visitor(
|
||||
Visitor&& visitor,
|
||||
DiscriminatedPtr<Args...>& variant) {
|
||||
return variant.apply(std::forward<Visitor>(visitor));
|
||||
}
|
||||
|
||||
template <typename Visitor, typename... Args>
|
||||
decltype(auto) apply_visitor(
|
||||
Visitor&& visitor,
|
||||
DiscriminatedPtr<Args...>&& variant) {
|
||||
return variant.apply(std::forward<Visitor>(visitor));
|
||||
}
|
||||
|
||||
} // namespace folly
|
||||
|
254
ios/Pods/Folly/folly/DynamicConverter.h
generated
254
ios/Pods/Folly/folly/DynamicConverter.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2012-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -18,11 +18,23 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <iterator>
|
||||
#include <type_traits>
|
||||
|
||||
#include <boost/iterator/iterator_adaptor.hpp>
|
||||
#include <boost/mpl/has_xxx.hpp>
|
||||
|
||||
#include <folly/Likely.h>
|
||||
#include <folly/Optional.h>
|
||||
#include <folly/Traits.h>
|
||||
#include <folly/dynamic.h>
|
||||
|
||||
namespace folly {
|
||||
template <typename T> T convertTo(const dynamic&);
|
||||
template <typename T> dynamic toDynamic(const T&);
|
||||
}
|
||||
template <typename T>
|
||||
T convertTo(const dynamic&);
|
||||
template <typename T>
|
||||
dynamic toDynamic(const T&);
|
||||
} // namespace folly
|
||||
|
||||
/**
|
||||
* convertTo returns a well-typed representation of the input dynamic.
|
||||
@ -37,13 +49,6 @@ namespace folly {
|
||||
* See docs/DynamicConverter.md for supported types and customization
|
||||
*/
|
||||
|
||||
|
||||
#include <type_traits>
|
||||
#include <iterator>
|
||||
#include <boost/iterator/iterator_adaptor.hpp>
|
||||
#include <boost/mpl/has_xxx.hpp>
|
||||
#include <folly/Likely.h>
|
||||
|
||||
namespace folly {
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
@ -51,49 +56,35 @@ namespace folly {
|
||||
|
||||
namespace dynamicconverter_detail {
|
||||
|
||||
BOOST_MPL_HAS_XXX_TRAIT_DEF(value_type);
|
||||
BOOST_MPL_HAS_XXX_TRAIT_DEF(iterator);
|
||||
BOOST_MPL_HAS_XXX_TRAIT_DEF(mapped_type);
|
||||
BOOST_MPL_HAS_XXX_TRAIT_DEF(value_type)
|
||||
BOOST_MPL_HAS_XXX_TRAIT_DEF(iterator)
|
||||
BOOST_MPL_HAS_XXX_TRAIT_DEF(mapped_type)
|
||||
BOOST_MPL_HAS_XXX_TRAIT_DEF(key_type)
|
||||
|
||||
template <typename T> struct iterator_class_is_container {
|
||||
template <typename T>
|
||||
struct iterator_class_is_container {
|
||||
typedef std::reverse_iterator<typename T::iterator> some_iterator;
|
||||
enum { value = has_value_type<T>::value &&
|
||||
std::is_constructible<T, some_iterator, some_iterator>::value };
|
||||
enum {
|
||||
value = has_value_type<T>::value &&
|
||||
std::is_constructible<T, some_iterator, some_iterator>::value
|
||||
};
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
using class_is_container = typename
|
||||
std::conditional<
|
||||
has_iterator<T>::value,
|
||||
iterator_class_is_container<T>,
|
||||
std::false_type
|
||||
>::type;
|
||||
using class_is_container =
|
||||
Conjunction<has_iterator<T>, iterator_class_is_container<T>>;
|
||||
|
||||
template <typename T> struct class_is_range {
|
||||
enum { value = has_value_type<T>::value &&
|
||||
has_iterator<T>::value };
|
||||
};
|
||||
template <typename T>
|
||||
using is_range = StrictConjunction<has_value_type<T>, has_iterator<T>>;
|
||||
|
||||
template <typename T>
|
||||
using is_container = StrictConjunction<std::is_class<T>, class_is_container<T>>;
|
||||
|
||||
template <typename T> struct is_container
|
||||
: std::conditional<
|
||||
std::is_class<T>::value,
|
||||
class_is_container<T>,
|
||||
std::false_type
|
||||
>::type {};
|
||||
template <typename T>
|
||||
using is_map = StrictConjunction<is_range<T>, has_mapped_type<T>>;
|
||||
|
||||
template <typename T> struct is_range
|
||||
: std::conditional<
|
||||
std::is_class<T>::value,
|
||||
class_is_range<T>,
|
||||
std::false_type
|
||||
>::type {};
|
||||
|
||||
template <typename T> struct is_map
|
||||
: std::integral_constant<
|
||||
bool,
|
||||
is_range<T>::value && has_mapped_type<T>::value
|
||||
> {};
|
||||
template <typename T>
|
||||
using is_associative = StrictConjunction<is_range<T>, has_key_type<T>>;
|
||||
|
||||
} // namespace dynamicconverter_detail
|
||||
|
||||
@ -114,70 +105,67 @@ template <typename T> struct is_map
|
||||
|
||||
namespace dynamicconverter_detail {
|
||||
|
||||
template<typename T>
|
||||
template <typename T>
|
||||
struct Dereferencer {
|
||||
static inline void derefToCache(
|
||||
T* /* mem */, const dynamic::const_item_iterator& /* it */) {
|
||||
Optional<T>* /* mem */,
|
||||
const dynamic::const_item_iterator& /* it */) {
|
||||
throw TypeError("array", dynamic::Type::OBJECT);
|
||||
}
|
||||
|
||||
static inline void derefToCache(T* mem, const dynamic::const_iterator& it) {
|
||||
new (mem) T(convertTo<T>(*it));
|
||||
static inline void derefToCache(
|
||||
Optional<T>* mem,
|
||||
const dynamic::const_iterator& it) {
|
||||
mem->emplace(convertTo<T>(*it));
|
||||
}
|
||||
};
|
||||
|
||||
template<typename F, typename S>
|
||||
template <typename F, typename S>
|
||||
struct Dereferencer<std::pair<F, S>> {
|
||||
static inline void
|
||||
derefToCache(std::pair<F, S>* mem, const dynamic::const_item_iterator& it) {
|
||||
new (mem) std::pair<F, S>(
|
||||
convertTo<F>(it->first), convertTo<S>(it->second)
|
||||
);
|
||||
static inline void derefToCache(
|
||||
Optional<std::pair<F, S>>* mem,
|
||||
const dynamic::const_item_iterator& it) {
|
||||
mem->emplace(convertTo<F>(it->first), convertTo<S>(it->second));
|
||||
}
|
||||
|
||||
// Intentional duplication of the code in Dereferencer
|
||||
template <typename T>
|
||||
static inline void derefToCache(T* mem, const dynamic::const_iterator& it) {
|
||||
new (mem) T(convertTo<T>(*it));
|
||||
static inline void derefToCache(
|
||||
Optional<T>* mem,
|
||||
const dynamic::const_iterator& it) {
|
||||
mem->emplace(convertTo<T>(*it));
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename It>
|
||||
class Transformer : public boost::iterator_adaptor<
|
||||
Transformer<T, It>,
|
||||
It,
|
||||
typename T::value_type
|
||||
> {
|
||||
class Transformer
|
||||
: public boost::
|
||||
iterator_adaptor<Transformer<T, It>, It, typename T::value_type> {
|
||||
friend class boost::iterator_core_access;
|
||||
|
||||
typedef typename T::value_type ttype;
|
||||
|
||||
mutable ttype cache_;
|
||||
mutable bool valid_;
|
||||
mutable Optional<ttype> cache_;
|
||||
|
||||
void increment() {
|
||||
++this->base_reference();
|
||||
valid_ = false;
|
||||
cache_ = none;
|
||||
}
|
||||
|
||||
ttype& dereference() const {
|
||||
if (LIKELY(!valid_)) {
|
||||
cache_.~ttype();
|
||||
if (!cache_) {
|
||||
Dereferencer<ttype>::derefToCache(&cache_, this->base_reference());
|
||||
valid_ = true;
|
||||
}
|
||||
return cache_;
|
||||
return cache_.value();
|
||||
}
|
||||
|
||||
public:
|
||||
explicit Transformer(const It& it)
|
||||
: Transformer::iterator_adaptor_(it), valid_(false) {}
|
||||
public:
|
||||
explicit Transformer(const It& it) : Transformer::iterator_adaptor_(it) {}
|
||||
};
|
||||
|
||||
// conversion factory
|
||||
template <typename T, typename It>
|
||||
inline std::move_iterator<Transformer<T, It>>
|
||||
conversionIterator(const It& it) {
|
||||
inline std::move_iterator<Transformer<T, It>> conversionIterator(const It& it) {
|
||||
return std::make_move_iterator(Transformer<T, It>(it));
|
||||
}
|
||||
|
||||
@ -192,7 +180,8 @@ conversionIterator(const It& it) {
|
||||
*/
|
||||
|
||||
// default - intentionally unimplemented
|
||||
template <typename T, typename Enable = void> struct DynamicConverter;
|
||||
template <typename T, typename Enable = void>
|
||||
struct DynamicConverter;
|
||||
|
||||
// boolean
|
||||
template <>
|
||||
@ -204,9 +193,10 @@ struct DynamicConverter<bool> {
|
||||
|
||||
// integrals
|
||||
template <typename T>
|
||||
struct DynamicConverter<T,
|
||||
typename std::enable_if<std::is_integral<T>::value &&
|
||||
!std::is_same<T, bool>::value>::type> {
|
||||
struct DynamicConverter<
|
||||
T,
|
||||
typename std::enable_if<
|
||||
std::is_integral<T>::value && !std::is_same<T, bool>::value>::type> {
|
||||
static T convert(const dynamic& d) {
|
||||
return folly::to<T>(d.asInt());
|
||||
}
|
||||
@ -214,8 +204,9 @@ struct DynamicConverter<T,
|
||||
|
||||
// enums
|
||||
template <typename T>
|
||||
struct DynamicConverter<T,
|
||||
typename std::enable_if<std::is_enum<T>::value>::type> {
|
||||
struct DynamicConverter<
|
||||
T,
|
||||
typename std::enable_if<std::is_enum<T>::value>::type> {
|
||||
static T convert(const dynamic& d) {
|
||||
using type = typename std::underlying_type<T>::type;
|
||||
return static_cast<T>(DynamicConverter<type>::convert(d));
|
||||
@ -224,7 +215,8 @@ struct DynamicConverter<T,
|
||||
|
||||
// floating point
|
||||
template <typename T>
|
||||
struct DynamicConverter<T,
|
||||
struct DynamicConverter<
|
||||
T,
|
||||
typename std::enable_if<std::is_floating_point<T>::value>::type> {
|
||||
static T convert(const dynamic& d) {
|
||||
return folly::to<T>(d.asDouble());
|
||||
@ -249,7 +241,7 @@ struct DynamicConverter<std::string> {
|
||||
|
||||
// std::pair
|
||||
template <typename F, typename S>
|
||||
struct DynamicConverter<std::pair<F,S>> {
|
||||
struct DynamicConverter<std::pair<F, S>> {
|
||||
static std::pair<F, S> convert(const dynamic& d) {
|
||||
if (d.isArray() && d.size() == 2) {
|
||||
return std::make_pair(convertTo<F>(d[0]), convertTo<S>(d[1]));
|
||||
@ -262,26 +254,53 @@ struct DynamicConverter<std::pair<F,S>> {
|
||||
}
|
||||
};
|
||||
|
||||
// containers
|
||||
// non-associative containers
|
||||
template <typename C>
|
||||
struct DynamicConverter<C,
|
||||
struct DynamicConverter<
|
||||
C,
|
||||
typename std::enable_if<
|
||||
dynamicconverter_detail::is_container<C>::value>::type> {
|
||||
dynamicconverter_detail::is_container<C>::value &&
|
||||
!dynamicconverter_detail::is_associative<C>::value>::type> {
|
||||
static C convert(const dynamic& d) {
|
||||
if (d.isArray()) {
|
||||
return C(dynamicconverter_detail::conversionIterator<C>(d.begin()),
|
||||
dynamicconverter_detail::conversionIterator<C>(d.end()));
|
||||
return C(
|
||||
dynamicconverter_detail::conversionIterator<C>(d.begin()),
|
||||
dynamicconverter_detail::conversionIterator<C>(d.end()));
|
||||
} else if (d.isObject()) {
|
||||
return C(dynamicconverter_detail::conversionIterator<C>
|
||||
(d.items().begin()),
|
||||
dynamicconverter_detail::conversionIterator<C>
|
||||
(d.items().end()));
|
||||
return C(
|
||||
dynamicconverter_detail::conversionIterator<C>(d.items().begin()),
|
||||
dynamicconverter_detail::conversionIterator<C>(d.items().end()));
|
||||
} else {
|
||||
throw TypeError("object or array", d.type());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// associative containers
|
||||
template <typename C>
|
||||
struct DynamicConverter<
|
||||
C,
|
||||
typename std::enable_if<
|
||||
dynamicconverter_detail::is_container<C>::value &&
|
||||
dynamicconverter_detail::is_associative<C>::value>::type> {
|
||||
static C convert(const dynamic& d) {
|
||||
C ret; // avoid direct initialization due to unordered_map's constructor
|
||||
// causing memory corruption if the iterator throws an exception
|
||||
if (d.isArray()) {
|
||||
ret.insert(
|
||||
dynamicconverter_detail::conversionIterator<C>(d.begin()),
|
||||
dynamicconverter_detail::conversionIterator<C>(d.end()));
|
||||
} else if (d.isObject()) {
|
||||
ret.insert(
|
||||
dynamicconverter_detail::conversionIterator<C>(d.items().begin()),
|
||||
dynamicconverter_detail::conversionIterator<C>(d.items().end()));
|
||||
} else {
|
||||
throw TypeError("object or array", d.type());
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
};
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// DynamicConstructor specializations
|
||||
|
||||
@ -298,14 +317,26 @@ struct DynamicConstructor {
|
||||
}
|
||||
};
|
||||
|
||||
// identity
|
||||
template <typename C>
|
||||
struct DynamicConstructor<
|
||||
C,
|
||||
typename std::enable_if<std::is_same<C, dynamic>::value>::type> {
|
||||
static dynamic construct(const C& x) {
|
||||
return x;
|
||||
}
|
||||
};
|
||||
|
||||
// maps
|
||||
template<typename C>
|
||||
struct DynamicConstructor<C,
|
||||
template <typename C>
|
||||
struct DynamicConstructor<
|
||||
C,
|
||||
typename std::enable_if<
|
||||
dynamicconverter_detail::is_map<C>::value>::type> {
|
||||
!std::is_same<C, dynamic>::value &&
|
||||
dynamicconverter_detail::is_map<C>::value>::type> {
|
||||
static dynamic construct(const C& x) {
|
||||
dynamic d = dynamic::object;
|
||||
for (auto& pair : x) {
|
||||
for (const auto& pair : x) {
|
||||
d.insert(toDynamic(pair.first), toDynamic(pair.second));
|
||||
}
|
||||
return d;
|
||||
@ -313,15 +344,17 @@ struct DynamicConstructor<C,
|
||||
};
|
||||
|
||||
// other ranges
|
||||
template<typename C>
|
||||
struct DynamicConstructor<C,
|
||||
template <typename C>
|
||||
struct DynamicConstructor<
|
||||
C,
|
||||
typename std::enable_if<
|
||||
!dynamicconverter_detail::is_map<C>::value &&
|
||||
!std::is_constructible<StringPiece, const C&>::value &&
|
||||
dynamicconverter_detail::is_range<C>::value>::type> {
|
||||
!std::is_same<C, dynamic>::value &&
|
||||
!dynamicconverter_detail::is_map<C>::value &&
|
||||
!std::is_constructible<StringPiece, const C&>::value &&
|
||||
dynamicconverter_detail::is_range<C>::value>::type> {
|
||||
static dynamic construct(const C& x) {
|
||||
dynamic d = dynamic::array;
|
||||
for (auto& item : x) {
|
||||
for (const auto& item : x) {
|
||||
d.push_back(toDynamic(item));
|
||||
}
|
||||
return d;
|
||||
@ -329,7 +362,7 @@ struct DynamicConstructor<C,
|
||||
};
|
||||
|
||||
// pair
|
||||
template<typename A, typename B>
|
||||
template <typename A, typename B>
|
||||
struct DynamicConstructor<std::pair<A, B>, void> {
|
||||
static dynamic construct(const std::pair<A, B>& x) {
|
||||
dynamic d = dynamic::array;
|
||||
@ -339,6 +372,21 @@ struct DynamicConstructor<std::pair<A, B>, void> {
|
||||
}
|
||||
};
|
||||
|
||||
// vector<bool>
|
||||
template <>
|
||||
struct DynamicConstructor<std::vector<bool>, void> {
|
||||
static dynamic construct(const std::vector<bool>& x) {
|
||||
dynamic d = dynamic::array;
|
||||
// Intentionally specifying the type as bool here.
|
||||
// std::vector<bool>'s iterators return a proxy which is a prvalue
|
||||
// and hence cannot bind to an lvalue reference such as auto&
|
||||
for (bool item : x) {
|
||||
d.push_back(toDynamic(item));
|
||||
}
|
||||
return d;
|
||||
}
|
||||
};
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// implementation
|
||||
|
||||
@ -347,7 +395,7 @@ T convertTo(const dynamic& d) {
|
||||
return DynamicConverter<typename std::remove_cv<T>::type>::convert(d);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
template <typename T>
|
||||
dynamic toDynamic(const T& x) {
|
||||
return DynamicConstructor<typename std::remove_cv<T>::type>::construct(x);
|
||||
}
|
||||
|
52
ios/Pods/Folly/folly/Exception.h
generated
52
ios/Pods/Folly/folly/Exception.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2013-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -35,15 +35,40 @@ namespace folly {
|
||||
//
|
||||
// The *Explicit functions take an explicit value for errno.
|
||||
|
||||
inline std::system_error makeSystemErrorExplicit(int err, const char* msg) {
|
||||
// TODO: The C++ standard indicates that std::generic_category() should be
|
||||
// used for POSIX errno codes.
|
||||
//
|
||||
// We should ideally change this to use std::generic_category() instead of
|
||||
// std::system_category(). However, undertaking this change will require
|
||||
// updating existing call sites that currently catch exceptions thrown by
|
||||
// this code and currently expect std::system_category.
|
||||
return std::system_error(err, std::system_category(), msg);
|
||||
}
|
||||
|
||||
template <class... Args>
|
||||
std::system_error makeSystemErrorExplicit(int err, Args&&... args) {
|
||||
return makeSystemErrorExplicit(
|
||||
err, to<fbstring>(std::forward<Args>(args)...).c_str());
|
||||
}
|
||||
|
||||
inline std::system_error makeSystemError(const char* msg) {
|
||||
return makeSystemErrorExplicit(errno, msg);
|
||||
}
|
||||
|
||||
template <class... Args>
|
||||
std::system_error makeSystemError(Args&&... args) {
|
||||
return makeSystemErrorExplicit(errno, std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
// Helper to throw std::system_error
|
||||
[[noreturn]] inline void throwSystemErrorExplicit(int err, const char* msg) {
|
||||
throw std::system_error(err, std::system_category(), msg);
|
||||
throw makeSystemErrorExplicit(err, msg);
|
||||
}
|
||||
|
||||
template <class... Args>
|
||||
[[noreturn]] void throwSystemErrorExplicit(int err, Args&&... args) {
|
||||
throwSystemErrorExplicit(
|
||||
err, to<fbstring>(std::forward<Args>(args)...).c_str());
|
||||
throw makeSystemErrorExplicit(err, std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
// Helper to throw std::system_error from errno and components of a string
|
||||
@ -66,7 +91,7 @@ void checkPosixError(int err, Args&&... args) {
|
||||
template <class... Args>
|
||||
void checkKernelError(ssize_t ret, Args&&... args) {
|
||||
if (UNLIKELY(ret < 0)) {
|
||||
throwSystemErrorExplicit(-ret, std::forward<Args>(args)...);
|
||||
throwSystemErrorExplicit(int(-ret), std::forward<Args>(args)...);
|
||||
}
|
||||
}
|
||||
|
||||
@ -103,18 +128,15 @@ void checkFopenErrorExplicit(FILE* fp, int savedErrno, Args&&... args) {
|
||||
}
|
||||
}
|
||||
|
||||
template <typename E, typename V, typename... Args>
|
||||
void throwOnFail(V&& value, Args&&... args) {
|
||||
if (!value) {
|
||||
throw E(std::forward<Args>(args)...);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* If cond is not true, raise an exception of type E. E must have a ctor that
|
||||
* works with const char* (a description of the failure).
|
||||
*/
|
||||
#define CHECK_THROW(cond, E) \
|
||||
::folly::throwOnFail<E>((cond), "Check failed: " #cond)
|
||||
#define CHECK_THROW(cond, E) \
|
||||
do { \
|
||||
if (!(cond)) { \
|
||||
throw E("Check failed: " #cond); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
} // namespace folly
|
||||
} // namespace folly
|
||||
|
5
ios/Pods/Folly/folly/ExceptionString.h
generated
5
ios/Pods/Folly/folly/ExceptionString.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2016-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -62,7 +62,8 @@ auto exceptionStr(const E& e) -> typename std::
|
||||
#ifdef FOLLY_HAS_RTTI
|
||||
return demangle(typeid(e));
|
||||
#else
|
||||
return "Exception (no RTTI available)";
|
||||
(void)e;
|
||||
return "Exception (no RTTI available) ";
|
||||
#endif
|
||||
}
|
||||
|
||||
|
677
ios/Pods/Folly/folly/ExceptionWrapper-inl.h
generated
Normal file
677
ios/Pods/Folly/folly/ExceptionWrapper-inl.h
generated
Normal file
@ -0,0 +1,677 @@
|
||||
/*
|
||||
* Copyright 2017-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
/*
|
||||
*
|
||||
* Author: Eric Niebler <eniebler@fb.com>
|
||||
*/
|
||||
|
||||
#include <folly/Portability.h>
|
||||
|
||||
namespace folly {
|
||||
|
||||
template <class Fn>
|
||||
struct exception_wrapper::arg_type_
|
||||
: public arg_type_<decltype(&Fn::operator())> {};
|
||||
template <class Ret, class Class, class Arg>
|
||||
struct exception_wrapper::arg_type_<Ret (Class::*)(Arg)> {
|
||||
using type = Arg;
|
||||
};
|
||||
template <class Ret, class Class, class Arg>
|
||||
struct exception_wrapper::arg_type_<Ret (Class::*)(Arg) const> {
|
||||
using type = Arg;
|
||||
};
|
||||
template <class Ret, class Arg>
|
||||
struct exception_wrapper::arg_type_<Ret(Arg)> {
|
||||
using type = Arg;
|
||||
};
|
||||
template <class Ret, class Arg>
|
||||
struct exception_wrapper::arg_type_<Ret (*)(Arg)> {
|
||||
using type = Arg;
|
||||
};
|
||||
template <class Ret, class Class>
|
||||
struct exception_wrapper::arg_type_<Ret (Class::*)(...)> {
|
||||
using type = AnyException;
|
||||
};
|
||||
template <class Ret, class Class>
|
||||
struct exception_wrapper::arg_type_<Ret (Class::*)(...) const> {
|
||||
using type = AnyException;
|
||||
};
|
||||
template <class Ret>
|
||||
struct exception_wrapper::arg_type_<Ret(...)> {
|
||||
using type = AnyException;
|
||||
};
|
||||
template <class Ret>
|
||||
struct exception_wrapper::arg_type_<Ret (*)(...)> {
|
||||
using type = AnyException;
|
||||
};
|
||||
|
||||
template <class Ret, class... Args>
|
||||
inline Ret exception_wrapper::noop_(Args...) {
|
||||
return Ret();
|
||||
}
|
||||
|
||||
inline std::type_info const* exception_wrapper::uninit_type_(
|
||||
exception_wrapper const*) {
|
||||
return &typeid(void);
|
||||
}
|
||||
|
||||
template <class Ex, typename... As>
|
||||
inline exception_wrapper::Buffer::Buffer(in_place_type_t<Ex>, As&&... as_) {
|
||||
::new (static_cast<void*>(&buff_)) Ex(std::forward<As>(as_)...);
|
||||
}
|
||||
|
||||
template <class Ex>
|
||||
inline Ex& exception_wrapper::Buffer::as() noexcept {
|
||||
return *static_cast<Ex*>(static_cast<void*>(&buff_));
|
||||
}
|
||||
template <class Ex>
|
||||
inline Ex const& exception_wrapper::Buffer::as() const noexcept {
|
||||
return *static_cast<Ex const*>(static_cast<void const*>(&buff_));
|
||||
}
|
||||
|
||||
inline std::exception const* exception_wrapper::as_exception_or_null_(
|
||||
std::exception const& ex) {
|
||||
return &ex;
|
||||
}
|
||||
inline std::exception const* exception_wrapper::as_exception_or_null_(
|
||||
AnyException) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
static_assert(
|
||||
!kMicrosoftAbiVer || (kMicrosoftAbiVer >= 1900 && kMicrosoftAbiVer <= 2000),
|
||||
"exception_wrapper is untested and possibly broken on your version of "
|
||||
"MSVC");
|
||||
|
||||
inline std::uintptr_t exception_wrapper::ExceptionPtr::as_int_(
|
||||
std::exception_ptr const& ptr,
|
||||
std::exception const& e) noexcept {
|
||||
if (!kMicrosoftAbiVer) {
|
||||
return reinterpret_cast<std::uintptr_t>(&e);
|
||||
} else {
|
||||
// On Windows, as of MSVC2017, all thrown exceptions are copied to the stack
|
||||
// first. Thus, we cannot depend on exception references associated with an
|
||||
// exception_ptr to be live for the duration of the exception_ptr. We need
|
||||
// to directly access the heap allocated memory inside the exception_ptr.
|
||||
//
|
||||
// std::exception_ptr is an opaque reinterpret_cast of
|
||||
// std::shared_ptr<__ExceptionPtr>
|
||||
// __ExceptionPtr is a non-virtual class with two members, a union and a
|
||||
// bool. The union contains the now-undocumented EHExceptionRecord, which
|
||||
// contains a struct which contains a void* which points to the heap
|
||||
// allocated exception.
|
||||
// We derive the offset to pExceptionObject via manual means.
|
||||
FOLLY_PACK_PUSH
|
||||
struct Win32ExceptionPtr {
|
||||
char offset[8 + 4 * sizeof(void*)];
|
||||
void* exceptionObject;
|
||||
} FOLLY_PACK_ATTR;
|
||||
FOLLY_PACK_POP
|
||||
|
||||
auto* win32ExceptionPtr =
|
||||
reinterpret_cast<std::shared_ptr<Win32ExceptionPtr> const*>(&ptr)
|
||||
->get();
|
||||
return reinterpret_cast<std::uintptr_t>(win32ExceptionPtr->exceptionObject);
|
||||
}
|
||||
}
|
||||
inline std::uintptr_t exception_wrapper::ExceptionPtr::as_int_(
|
||||
std::exception_ptr const&,
|
||||
AnyException e) noexcept {
|
||||
return reinterpret_cast<std::uintptr_t>(e.typeinfo_) + 1;
|
||||
}
|
||||
inline bool exception_wrapper::ExceptionPtr::has_exception_() const {
|
||||
return 0 == exception_or_type_ % 2;
|
||||
}
|
||||
inline std::exception const* exception_wrapper::ExceptionPtr::as_exception_()
|
||||
const {
|
||||
return reinterpret_cast<std::exception const*>(exception_or_type_);
|
||||
}
|
||||
inline std::type_info const* exception_wrapper::ExceptionPtr::as_type_() const {
|
||||
return reinterpret_cast<std::type_info const*>(exception_or_type_ - 1);
|
||||
}
|
||||
|
||||
inline void exception_wrapper::ExceptionPtr::copy_(
|
||||
exception_wrapper const* from,
|
||||
exception_wrapper* to) {
|
||||
::new (static_cast<void*>(&to->eptr_)) ExceptionPtr(from->eptr_);
|
||||
}
|
||||
inline void exception_wrapper::ExceptionPtr::move_(
|
||||
exception_wrapper* from,
|
||||
exception_wrapper* to) {
|
||||
::new (static_cast<void*>(&to->eptr_)) ExceptionPtr(std::move(from->eptr_));
|
||||
delete_(from);
|
||||
}
|
||||
inline void exception_wrapper::ExceptionPtr::delete_(exception_wrapper* that) {
|
||||
that->eptr_.~ExceptionPtr();
|
||||
that->vptr_ = &uninit_;
|
||||
}
|
||||
[[noreturn]] inline void exception_wrapper::ExceptionPtr::throw_(
|
||||
exception_wrapper const* that) {
|
||||
std::rethrow_exception(that->eptr_.ptr_);
|
||||
}
|
||||
inline std::type_info const* exception_wrapper::ExceptionPtr::type_(
|
||||
exception_wrapper const* that) {
|
||||
if (auto e = get_exception_(that)) {
|
||||
return &typeid(*e);
|
||||
}
|
||||
return that->eptr_.as_type_();
|
||||
}
|
||||
inline std::exception const* exception_wrapper::ExceptionPtr::get_exception_(
|
||||
exception_wrapper const* that) {
|
||||
return that->eptr_.has_exception_() ? that->eptr_.as_exception_() : nullptr;
|
||||
}
|
||||
inline exception_wrapper exception_wrapper::ExceptionPtr::get_exception_ptr_(
|
||||
exception_wrapper const* that) {
|
||||
return *that;
|
||||
}
|
||||
|
||||
template <class Ex>
|
||||
inline void exception_wrapper::InPlace<Ex>::copy_(
|
||||
exception_wrapper const* from,
|
||||
exception_wrapper* to) {
|
||||
::new (static_cast<void*>(std::addressof(to->buff_.as<Ex>())))
|
||||
Ex(from->buff_.as<Ex>());
|
||||
}
|
||||
template <class Ex>
|
||||
inline void exception_wrapper::InPlace<Ex>::move_(
|
||||
exception_wrapper* from,
|
||||
exception_wrapper* to) {
|
||||
::new (static_cast<void*>(std::addressof(to->buff_.as<Ex>())))
|
||||
Ex(std::move(from->buff_.as<Ex>()));
|
||||
delete_(from);
|
||||
}
|
||||
template <class Ex>
|
||||
inline void exception_wrapper::InPlace<Ex>::delete_(exception_wrapper* that) {
|
||||
that->buff_.as<Ex>().~Ex();
|
||||
that->vptr_ = &uninit_;
|
||||
}
|
||||
template <class Ex>
|
||||
[[noreturn]] inline void exception_wrapper::InPlace<Ex>::throw_(
|
||||
exception_wrapper const* that) {
|
||||
throw that->buff_.as<Ex>(); // @nolint
|
||||
}
|
||||
template <class Ex>
|
||||
inline std::type_info const* exception_wrapper::InPlace<Ex>::type_(
|
||||
exception_wrapper const*) {
|
||||
return &typeid(Ex);
|
||||
}
|
||||
template <class Ex>
|
||||
inline std::exception const* exception_wrapper::InPlace<Ex>::get_exception_(
|
||||
exception_wrapper const* that) {
|
||||
return as_exception_or_null_(that->buff_.as<Ex>());
|
||||
}
|
||||
template <class Ex>
|
||||
inline exception_wrapper exception_wrapper::InPlace<Ex>::get_exception_ptr_(
|
||||
exception_wrapper const* that) {
|
||||
try {
|
||||
throw_(that);
|
||||
} catch (Ex const& ex) {
|
||||
return exception_wrapper{std::current_exception(), ex};
|
||||
}
|
||||
}
|
||||
|
||||
template <class Ex>
|
||||
[[noreturn]] inline void exception_wrapper::SharedPtr::Impl<Ex>::throw_()
|
||||
const {
|
||||
throw ex_; // @nolint
|
||||
}
|
||||
template <class Ex>
|
||||
inline std::exception const*
|
||||
exception_wrapper::SharedPtr::Impl<Ex>::get_exception_() const noexcept {
|
||||
return as_exception_or_null_(ex_);
|
||||
}
|
||||
template <class Ex>
|
||||
inline exception_wrapper
|
||||
exception_wrapper::SharedPtr::Impl<Ex>::get_exception_ptr_() const noexcept {
|
||||
try {
|
||||
throw_();
|
||||
} catch (Ex& ex) {
|
||||
return exception_wrapper{std::current_exception(), ex};
|
||||
}
|
||||
}
|
||||
inline void exception_wrapper::SharedPtr::copy_(
|
||||
exception_wrapper const* from,
|
||||
exception_wrapper* to) {
|
||||
::new (static_cast<void*>(std::addressof(to->sptr_))) SharedPtr(from->sptr_);
|
||||
}
|
||||
inline void exception_wrapper::SharedPtr::move_(
|
||||
exception_wrapper* from,
|
||||
exception_wrapper* to) {
|
||||
::new (static_cast<void*>(std::addressof(to->sptr_)))
|
||||
SharedPtr(std::move(from->sptr_));
|
||||
delete_(from);
|
||||
}
|
||||
inline void exception_wrapper::SharedPtr::delete_(exception_wrapper* that) {
|
||||
that->sptr_.~SharedPtr();
|
||||
that->vptr_ = &uninit_;
|
||||
}
|
||||
[[noreturn]] inline void exception_wrapper::SharedPtr::throw_(
|
||||
exception_wrapper const* that) {
|
||||
that->sptr_.ptr_->throw_();
|
||||
folly::assume_unreachable();
|
||||
}
|
||||
inline std::type_info const* exception_wrapper::SharedPtr::type_(
|
||||
exception_wrapper const* that) {
|
||||
return that->sptr_.ptr_->info_;
|
||||
}
|
||||
inline std::exception const* exception_wrapper::SharedPtr::get_exception_(
|
||||
exception_wrapper const* that) {
|
||||
return that->sptr_.ptr_->get_exception_();
|
||||
}
|
||||
inline exception_wrapper exception_wrapper::SharedPtr::get_exception_ptr_(
|
||||
exception_wrapper const* that) {
|
||||
return that->sptr_.ptr_->get_exception_ptr_();
|
||||
}
|
||||
|
||||
template <class Ex, typename... As>
|
||||
inline exception_wrapper::exception_wrapper(
|
||||
ThrownTag,
|
||||
in_place_type_t<Ex>,
|
||||
As&&... as)
|
||||
: eptr_{std::make_exception_ptr(Ex(std::forward<As>(as)...)),
|
||||
reinterpret_cast<std::uintptr_t>(std::addressof(typeid(Ex))) + 1u},
|
||||
vptr_(&ExceptionPtr::ops_) {}
|
||||
|
||||
template <class Ex, typename... As>
|
||||
inline exception_wrapper::exception_wrapper(
|
||||
OnHeapTag,
|
||||
in_place_type_t<Ex>,
|
||||
As&&... as)
|
||||
: sptr_{std::make_shared<SharedPtr::Impl<Ex>>(std::forward<As>(as)...)},
|
||||
vptr_(&SharedPtr::ops_) {}
|
||||
|
||||
template <class Ex, typename... As>
|
||||
inline exception_wrapper::exception_wrapper(
|
||||
InSituTag,
|
||||
in_place_type_t<Ex>,
|
||||
As&&... as)
|
||||
: buff_{in_place_type<Ex>, std::forward<As>(as)...},
|
||||
vptr_(&InPlace<Ex>::ops_) {}
|
||||
|
||||
inline exception_wrapper::exception_wrapper(exception_wrapper&& that) noexcept
|
||||
: exception_wrapper{} {
|
||||
(vptr_ = that.vptr_)->move_(&that, this); // Move into *this, won't throw
|
||||
}
|
||||
|
||||
inline exception_wrapper::exception_wrapper(
|
||||
exception_wrapper const& that) noexcept
|
||||
: exception_wrapper{} {
|
||||
that.vptr_->copy_(&that, this); // Copy into *this, won't throw
|
||||
vptr_ = that.vptr_;
|
||||
}
|
||||
|
||||
// If `this == &that`, this move assignment operator leaves the object in a
|
||||
// valid but unspecified state.
|
||||
inline exception_wrapper& exception_wrapper::operator=(
|
||||
exception_wrapper&& that) noexcept {
|
||||
vptr_->delete_(this); // Free the current exception
|
||||
(vptr_ = that.vptr_)->move_(&that, this); // Move into *this, won't throw
|
||||
return *this;
|
||||
}
|
||||
|
||||
inline exception_wrapper& exception_wrapper::operator=(
|
||||
exception_wrapper const& that) noexcept {
|
||||
exception_wrapper(that).swap(*this);
|
||||
return *this;
|
||||
}
|
||||
|
||||
inline exception_wrapper::~exception_wrapper() {
|
||||
reset();
|
||||
}
|
||||
|
||||
template <class Ex>
|
||||
inline exception_wrapper::exception_wrapper(
|
||||
std::exception_ptr ptr,
|
||||
Ex& ex) noexcept
|
||||
: eptr_{ptr, ExceptionPtr::as_int_(ptr, ex)}, vptr_(&ExceptionPtr::ops_) {
|
||||
assert(eptr_.ptr_);
|
||||
}
|
||||
|
||||
namespace exception_wrapper_detail {
|
||||
template <class Ex>
|
||||
Ex&& dont_slice(Ex&& ex) {
|
||||
assert(typeid(ex) == typeid(_t<std::decay<Ex>>) ||
|
||||
!"Dynamic and static exception types don't match. Exception would "
|
||||
"be sliced when storing in exception_wrapper.");
|
||||
return std::forward<Ex>(ex);
|
||||
}
|
||||
} // namespace exception_wrapper_detail
|
||||
|
||||
template <
|
||||
class Ex,
|
||||
class Ex_,
|
||||
FOLLY_REQUIRES_DEF(Conjunction<
|
||||
exception_wrapper::IsStdException<Ex_>,
|
||||
exception_wrapper::IsRegularExceptionType<Ex_>>::value)>
|
||||
inline exception_wrapper::exception_wrapper(Ex&& ex)
|
||||
: exception_wrapper{
|
||||
PlacementOf<Ex_>{},
|
||||
in_place_type<Ex_>,
|
||||
exception_wrapper_detail::dont_slice(std::forward<Ex>(ex))} {}
|
||||
|
||||
template <
|
||||
class Ex,
|
||||
class Ex_,
|
||||
FOLLY_REQUIRES_DEF(exception_wrapper::IsRegularExceptionType<Ex_>::value)>
|
||||
inline exception_wrapper::exception_wrapper(in_place_t, Ex&& ex)
|
||||
: exception_wrapper{
|
||||
PlacementOf<Ex_>{},
|
||||
in_place_type<Ex_>,
|
||||
exception_wrapper_detail::dont_slice(std::forward<Ex>(ex))} {}
|
||||
|
||||
template <
|
||||
class Ex,
|
||||
typename... As,
|
||||
FOLLY_REQUIRES_DEF(exception_wrapper::IsRegularExceptionType<Ex>::value)>
|
||||
inline exception_wrapper::exception_wrapper(in_place_type_t<Ex>, As&&... as)
|
||||
: exception_wrapper{PlacementOf<Ex>{},
|
||||
in_place_type<Ex>,
|
||||
std::forward<As>(as)...} {}
|
||||
|
||||
inline void exception_wrapper::swap(exception_wrapper& that) noexcept {
|
||||
exception_wrapper tmp(std::move(that));
|
||||
that = std::move(*this);
|
||||
*this = std::move(tmp);
|
||||
}
|
||||
|
||||
inline exception_wrapper::operator bool() const noexcept {
|
||||
return vptr_ != &uninit_;
|
||||
}
|
||||
|
||||
inline bool exception_wrapper::operator!() const noexcept {
|
||||
return !static_cast<bool>(*this);
|
||||
}
|
||||
|
||||
inline void exception_wrapper::reset() {
|
||||
vptr_->delete_(this);
|
||||
}
|
||||
|
||||
inline bool exception_wrapper::has_exception_ptr() const noexcept {
|
||||
return vptr_ == &ExceptionPtr::ops_;
|
||||
}
|
||||
|
||||
inline std::exception* exception_wrapper::get_exception() noexcept {
|
||||
return const_cast<std::exception*>(vptr_->get_exception_(this));
|
||||
}
|
||||
inline std::exception const* exception_wrapper::get_exception() const noexcept {
|
||||
return vptr_->get_exception_(this);
|
||||
}
|
||||
|
||||
template <typename Ex>
|
||||
inline Ex* exception_wrapper::get_exception() noexcept {
|
||||
Ex* object{nullptr};
|
||||
with_exception([&](Ex& ex) { object = &ex; });
|
||||
return object;
|
||||
}
|
||||
|
||||
template <typename Ex>
|
||||
inline Ex const* exception_wrapper::get_exception() const noexcept {
|
||||
Ex const* object{nullptr};
|
||||
with_exception([&](Ex const& ex) { object = &ex; });
|
||||
return object;
|
||||
}
|
||||
|
||||
inline std::exception_ptr const&
|
||||
exception_wrapper::to_exception_ptr() noexcept {
|
||||
// Computing an exception_ptr is expensive so cache the result.
|
||||
return (*this = vptr_->get_exception_ptr_(this)).eptr_.ptr_;
|
||||
}
|
||||
inline std::exception_ptr exception_wrapper::to_exception_ptr() const noexcept {
|
||||
return vptr_->get_exception_ptr_(this).eptr_.ptr_;
|
||||
}
|
||||
|
||||
inline std::type_info const& exception_wrapper::none() noexcept {
|
||||
return typeid(void);
|
||||
}
|
||||
inline std::type_info const& exception_wrapper::unknown() noexcept {
|
||||
return typeid(Unknown);
|
||||
}
|
||||
|
||||
inline std::type_info const& exception_wrapper::type() const noexcept {
|
||||
return *vptr_->type_(this);
|
||||
}
|
||||
|
||||
inline folly::fbstring exception_wrapper::what() const {
|
||||
if (auto e = get_exception()) {
|
||||
return class_name() + ": " + e->what();
|
||||
}
|
||||
return class_name();
|
||||
}
|
||||
|
||||
inline folly::fbstring exception_wrapper::class_name() const {
|
||||
auto& ti = type();
|
||||
return ti == none()
|
||||
? ""
|
||||
: ti == unknown() ? "<unknown exception>" : folly::demangle(ti);
|
||||
}
|
||||
|
||||
template <class Ex>
|
||||
inline bool exception_wrapper::is_compatible_with() const noexcept {
|
||||
return with_exception([](Ex const&) {});
|
||||
}
|
||||
|
||||
[[noreturn]] inline void exception_wrapper::throw_exception() const {
|
||||
vptr_->throw_(this);
|
||||
onNoExceptionError(__func__);
|
||||
}
|
||||
|
||||
template <class Ex>
|
||||
[[noreturn]] inline void exception_wrapper::throw_with_nested(Ex&& ex) const {
|
||||
try {
|
||||
throw_exception();
|
||||
} catch (...) {
|
||||
std::throw_with_nested(std::forward<Ex>(ex));
|
||||
}
|
||||
}
|
||||
|
||||
template <class CatchFn, bool IsConst>
|
||||
struct exception_wrapper::ExceptionTypeOf {
|
||||
using type = arg_type<_t<std::decay<CatchFn>>>;
|
||||
static_assert(
|
||||
std::is_reference<type>::value,
|
||||
"Always catch exceptions by reference.");
|
||||
static_assert(
|
||||
!IsConst || std::is_const<_t<std::remove_reference<type>>>::value,
|
||||
"handle() or with_exception() called on a const exception_wrapper "
|
||||
"and asked to catch a non-const exception. Handler will never fire. "
|
||||
"Catch exception by const reference to fix this.");
|
||||
};
|
||||
|
||||
// Nests a throw in the proper try/catch blocks
|
||||
template <bool IsConst>
|
||||
struct exception_wrapper::HandleReduce {
|
||||
bool* handled_;
|
||||
|
||||
template <
|
||||
class ThrowFn,
|
||||
class CatchFn,
|
||||
FOLLY_REQUIRES(!IsCatchAll<CatchFn>::value)>
|
||||
auto operator()(ThrowFn&& th, CatchFn& ca) const {
|
||||
using Ex = _t<ExceptionTypeOf<CatchFn, IsConst>>;
|
||||
return [th = std::forward<ThrowFn>(th), &ca, handled_ = handled_] {
|
||||
try {
|
||||
th();
|
||||
} catch (Ex& e) {
|
||||
// If we got here because a catch function threw, rethrow.
|
||||
if (*handled_) {
|
||||
throw;
|
||||
}
|
||||
*handled_ = true;
|
||||
ca(e);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
template <
|
||||
class ThrowFn,
|
||||
class CatchFn,
|
||||
FOLLY_REQUIRES(IsCatchAll<CatchFn>::value)>
|
||||
auto operator()(ThrowFn&& th, CatchFn& ca) const {
|
||||
return [th = std::forward<ThrowFn>(th), &ca, handled_ = handled_] {
|
||||
try {
|
||||
th();
|
||||
} catch (...) {
|
||||
// If we got here because a catch function threw, rethrow.
|
||||
if (*handled_) {
|
||||
throw;
|
||||
}
|
||||
*handled_ = true;
|
||||
ca();
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
// When all the handlers expect types derived from std::exception, we can
|
||||
// sometimes invoke the handlers without throwing any exceptions.
|
||||
template <bool IsConst>
|
||||
struct exception_wrapper::HandleStdExceptReduce {
|
||||
using StdEx = AddConstIf<IsConst, std::exception>;
|
||||
|
||||
template <
|
||||
class ThrowFn,
|
||||
class CatchFn,
|
||||
FOLLY_REQUIRES(!IsCatchAll<CatchFn>::value)>
|
||||
auto operator()(ThrowFn&& th, CatchFn& ca) const {
|
||||
using Ex = _t<ExceptionTypeOf<CatchFn, IsConst>>;
|
||||
return
|
||||
[th = std::forward<ThrowFn>(th), &ca](auto&& continuation) -> StdEx* {
|
||||
if (auto e = const_cast<StdEx*>(th(continuation))) {
|
||||
if (auto e2 = dynamic_cast<_t<std::add_pointer<Ex>>>(e)) {
|
||||
ca(*e2);
|
||||
} else {
|
||||
return e;
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
};
|
||||
}
|
||||
|
||||
template <
|
||||
class ThrowFn,
|
||||
class CatchFn,
|
||||
FOLLY_REQUIRES(IsCatchAll<CatchFn>::value)>
|
||||
auto operator()(ThrowFn&& th, CatchFn& ca) const {
|
||||
return [th = std::forward<ThrowFn>(th), &ca](auto &&) -> StdEx* {
|
||||
// The following continuation causes ca() to execute if *this contains
|
||||
// an exception /not/ derived from std::exception.
|
||||
auto continuation = [&ca](StdEx* e) {
|
||||
return e != nullptr ? e : ((void)ca(), nullptr);
|
||||
};
|
||||
if (th(continuation) != nullptr) {
|
||||
ca();
|
||||
}
|
||||
return nullptr;
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
// Called when some types in the catch clauses are not derived from
|
||||
// std::exception.
|
||||
template <class This, class... CatchFns>
|
||||
inline void
|
||||
exception_wrapper::handle_(std::false_type, This& this_, CatchFns&... fns) {
|
||||
bool handled = false;
|
||||
auto impl = exception_wrapper_detail::fold(
|
||||
HandleReduce<std::is_const<This>::value>{&handled},
|
||||
[&] { this_.throw_exception(); },
|
||||
fns...);
|
||||
impl();
|
||||
}
|
||||
|
||||
// Called when all types in the catch clauses are either derived from
|
||||
// std::exception or a catch-all clause.
|
||||
template <class This, class... CatchFns>
|
||||
inline void
|
||||
exception_wrapper::handle_(std::true_type, This& this_, CatchFns&... fns) {
|
||||
using StdEx = exception_wrapper_detail::
|
||||
AddConstIf<std::is_const<This>::value, std::exception>;
|
||||
auto impl = exception_wrapper_detail::fold(
|
||||
HandleStdExceptReduce<std::is_const<This>::value>{},
|
||||
[&](auto&& continuation) {
|
||||
return continuation(
|
||||
const_cast<StdEx*>(this_.vptr_->get_exception_(&this_)));
|
||||
},
|
||||
fns...);
|
||||
// This continuation gets evaluated if CatchFns... does not include a
|
||||
// catch-all handler. It is a no-op.
|
||||
auto continuation = [](StdEx* ex) { return ex; };
|
||||
if (nullptr != impl(continuation)) {
|
||||
this_.throw_exception();
|
||||
}
|
||||
}
|
||||
|
||||
namespace exception_wrapper_detail {
|
||||
template <class Ex, class Fn>
|
||||
struct catch_fn {
|
||||
Fn fn_;
|
||||
auto operator()(Ex& ex) {
|
||||
return fn_(ex);
|
||||
}
|
||||
};
|
||||
|
||||
template <class Ex, class Fn>
|
||||
inline catch_fn<Ex, Fn> catch_(Ex*, Fn fn) {
|
||||
return {std::move(fn)};
|
||||
}
|
||||
template <class Fn>
|
||||
inline Fn catch_(void const*, Fn fn) {
|
||||
return fn;
|
||||
}
|
||||
} // namespace exception_wrapper_detail
|
||||
|
||||
template <class Ex, class This, class Fn>
|
||||
inline bool exception_wrapper::with_exception_(This& this_, Fn fn_) {
|
||||
if (!this_) {
|
||||
return false;
|
||||
}
|
||||
bool handled = true;
|
||||
auto fn = exception_wrapper_detail::catch_(
|
||||
static_cast<Ex*>(nullptr), std::move(fn_));
|
||||
auto&& all = [&](...) { handled = false; };
|
||||
handle_(IsStdException<arg_type<decltype(fn)>>{}, this_, fn, all);
|
||||
return handled;
|
||||
}
|
||||
|
||||
template <class Ex, class Fn>
|
||||
inline bool exception_wrapper::with_exception(Fn fn) {
|
||||
return with_exception_<Ex>(*this, std::move(fn));
|
||||
}
|
||||
template <class Ex, class Fn>
|
||||
inline bool exception_wrapper::with_exception(Fn fn) const {
|
||||
return with_exception_<Ex const>(*this, std::move(fn));
|
||||
}
|
||||
|
||||
template <class... CatchFns>
|
||||
inline void exception_wrapper::handle(CatchFns... fns) {
|
||||
using AllStdEx =
|
||||
exception_wrapper_detail::AllOf<IsStdException, arg_type<CatchFns>...>;
|
||||
if (!*this) {
|
||||
onNoExceptionError(__func__);
|
||||
}
|
||||
this->handle_(AllStdEx{}, *this, fns...);
|
||||
}
|
||||
template <class... CatchFns>
|
||||
inline void exception_wrapper::handle(CatchFns... fns) const {
|
||||
using AllStdEx =
|
||||
exception_wrapper_detail::AllOf<IsStdException, arg_type<CatchFns>...>;
|
||||
if (!*this) {
|
||||
onNoExceptionError(__func__);
|
||||
}
|
||||
this->handle_(AllStdEx{}, *this, fns...);
|
||||
}
|
||||
|
||||
} // namespace folly
|
1086
ios/Pods/Folly/folly/ExceptionWrapper.h
generated
1086
ios/Pods/Folly/folly/ExceptionWrapper.h
generated
File diff suppressed because it is too large
Load Diff
189
ios/Pods/Folly/folly/Executor.h
generated
189
ios/Pods/Folly/folly/Executor.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2014-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -16,12 +16,11 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <cassert>
|
||||
#include <climits>
|
||||
#include <functional>
|
||||
#include <stdexcept>
|
||||
|
||||
#include <folly/Function.h>
|
||||
#include <folly/Utility.h>
|
||||
|
||||
namespace folly {
|
||||
|
||||
@ -31,7 +30,8 @@ using Func = Function<void()>;
|
||||
/// threadsafe.
|
||||
class Executor {
|
||||
public:
|
||||
virtual ~Executor() = default;
|
||||
// Workaround for a linkage problem with explicitly defaulted dtor t22914621
|
||||
virtual ~Executor() {}
|
||||
|
||||
/// Enqueue a function to executed by this executor. This and all
|
||||
/// variants must be threadsafe.
|
||||
@ -39,28 +39,177 @@ class Executor {
|
||||
|
||||
/// Enqueue a function with a given priority, where 0 is the medium priority
|
||||
/// This is up to the implementation to enforce
|
||||
virtual void addWithPriority(Func, int8_t /*priority*/) {
|
||||
throw std::runtime_error(
|
||||
"addWithPriority() is not implemented for this Executor");
|
||||
}
|
||||
virtual void addWithPriority(Func, int8_t priority);
|
||||
|
||||
virtual uint8_t getNumPriorities() const {
|
||||
return 1;
|
||||
}
|
||||
|
||||
static const int8_t LO_PRI = SCHAR_MIN;
|
||||
static const int8_t LO_PRI = SCHAR_MIN;
|
||||
static const int8_t MID_PRI = 0;
|
||||
static const int8_t HI_PRI = SCHAR_MAX;
|
||||
static const int8_t HI_PRI = SCHAR_MAX;
|
||||
|
||||
/// A convenience function for shared_ptr to legacy functors.
|
||||
///
|
||||
/// Sometimes you have a functor that is move-only, and therefore can't be
|
||||
/// converted to a std::function (e.g. std::packaged_task). In that case,
|
||||
/// wrap it in a shared_ptr (or maybe folly::MoveWrapper) and use this.
|
||||
template <class P>
|
||||
void addPtr(P fn) {
|
||||
this->add([fn]() mutable { (*fn)(); });
|
||||
template <typename ExecutorT = Executor>
|
||||
class KeepAlive {
|
||||
public:
|
||||
KeepAlive() = default;
|
||||
|
||||
~KeepAlive() {
|
||||
reset();
|
||||
}
|
||||
|
||||
KeepAlive(KeepAlive&& other) noexcept
|
||||
: executorAndDummyFlag_(exchange(other.executorAndDummyFlag_, 0)) {}
|
||||
|
||||
template <
|
||||
typename OtherExecutor,
|
||||
typename = typename std::enable_if<
|
||||
std::is_convertible<OtherExecutor*, ExecutorT*>::value>::type>
|
||||
/* implicit */ KeepAlive(KeepAlive<OtherExecutor>&& other) noexcept
|
||||
: KeepAlive(other.get(), other.executorAndDummyFlag_ & kDummyFlag) {
|
||||
other.executorAndDummyFlag_ = 0;
|
||||
}
|
||||
|
||||
KeepAlive& operator=(KeepAlive&& other) {
|
||||
reset();
|
||||
executorAndDummyFlag_ = exchange(other.executorAndDummyFlag_, 0);
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <
|
||||
typename OtherExecutor,
|
||||
typename = typename std::enable_if<
|
||||
std::is_convertible<OtherExecutor*, ExecutorT*>::value>::type>
|
||||
KeepAlive& operator=(KeepAlive<OtherExecutor>&& other) {
|
||||
return *this = KeepAlive(std::move(other));
|
||||
}
|
||||
|
||||
void reset() {
|
||||
if (Executor* executor = get()) {
|
||||
if (exchange(executorAndDummyFlag_, 0) & kDummyFlag) {
|
||||
return;
|
||||
}
|
||||
executor->keepAliveRelease();
|
||||
}
|
||||
}
|
||||
|
||||
explicit operator bool() const {
|
||||
return executorAndDummyFlag_;
|
||||
}
|
||||
|
||||
ExecutorT* get() const {
|
||||
return reinterpret_cast<ExecutorT*>(
|
||||
executorAndDummyFlag_ & kExecutorMask);
|
||||
}
|
||||
|
||||
ExecutorT& operator*() const {
|
||||
return *get();
|
||||
}
|
||||
|
||||
ExecutorT* operator->() const {
|
||||
return get();
|
||||
}
|
||||
|
||||
KeepAlive copy() const {
|
||||
return getKeepAliveToken(get());
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr intptr_t kDummyFlag = 1;
|
||||
static constexpr intptr_t kExecutorMask = ~kDummyFlag;
|
||||
|
||||
friend class Executor;
|
||||
template <typename OtherExecutor>
|
||||
friend class KeepAlive;
|
||||
|
||||
KeepAlive(ExecutorT* executor, bool dummy)
|
||||
: executorAndDummyFlag_(
|
||||
reinterpret_cast<intptr_t>(executor) | (dummy ? kDummyFlag : 0)) {
|
||||
assert(executor);
|
||||
assert(
|
||||
(reinterpret_cast<intptr_t>(executor) & kExecutorMask) ==
|
||||
reinterpret_cast<intptr_t>(executor));
|
||||
}
|
||||
|
||||
intptr_t executorAndDummyFlag_{reinterpret_cast<intptr_t>(nullptr)};
|
||||
};
|
||||
|
||||
template <typename ExecutorT>
|
||||
static KeepAlive<ExecutorT> getKeepAliveToken(ExecutorT* executor) {
|
||||
static_assert(
|
||||
std::is_base_of<Executor, ExecutorT>::value,
|
||||
"getKeepAliveToken only works for folly::Executor implementations.");
|
||||
if (!executor) {
|
||||
return {};
|
||||
}
|
||||
folly::Executor* executorPtr = executor;
|
||||
if (executorPtr->keepAliveAcquire()) {
|
||||
return makeKeepAlive<ExecutorT>(executor);
|
||||
}
|
||||
return makeKeepAliveDummy<ExecutorT>(executor);
|
||||
}
|
||||
|
||||
template <typename ExecutorT>
|
||||
static KeepAlive<ExecutorT> getKeepAliveToken(ExecutorT& executor) {
|
||||
static_assert(
|
||||
std::is_base_of<Executor, ExecutorT>::value,
|
||||
"getKeepAliveToken only works for folly::Executor implementations.");
|
||||
return getKeepAliveToken(&executor);
|
||||
}
|
||||
|
||||
protected:
|
||||
/**
|
||||
* Returns true if the KeepAlive is constructed from an executor that does
|
||||
* not support the keep alive ref-counting functionality
|
||||
*/
|
||||
template <typename ExecutorT>
|
||||
static bool isKeepAliveDummy(const KeepAlive<ExecutorT>& keepAlive) {
|
||||
return reinterpret_cast<intptr_t>(keepAlive.executorAndDummyFlag_) &
|
||||
KeepAlive<ExecutorT>::kDummyFlag;
|
||||
}
|
||||
|
||||
// Acquire a keep alive token. Should return false if keep-alive mechanism
|
||||
// is not supported.
|
||||
virtual bool keepAliveAcquire();
|
||||
// Release a keep alive token previously acquired by keepAliveAcquire().
|
||||
// Will never be called if keepAliveAcquire() returns false.
|
||||
virtual void keepAliveRelease();
|
||||
|
||||
template <typename ExecutorT>
|
||||
static KeepAlive<ExecutorT> makeKeepAlive(ExecutorT* executor) {
|
||||
static_assert(
|
||||
std::is_base_of<Executor, ExecutorT>::value,
|
||||
"makeKeepAlive only works for folly::Executor implementations.");
|
||||
return KeepAlive<ExecutorT>{executor, false};
|
||||
}
|
||||
|
||||
private:
|
||||
template <typename ExecutorT>
|
||||
static KeepAlive<ExecutorT> makeKeepAliveDummy(ExecutorT* executor) {
|
||||
static_assert(
|
||||
std::is_base_of<Executor, ExecutorT>::value,
|
||||
"makeKeepAliveDummy only works for folly::Executor implementations.");
|
||||
return KeepAlive<ExecutorT>{executor, true};
|
||||
}
|
||||
};
|
||||
|
||||
} // folly
|
||||
/// Returns a keep-alive token which guarantees that Executor will keep
|
||||
/// processing tasks until the token is released (if supported by Executor).
|
||||
/// KeepAlive always contains a valid pointer to an Executor.
|
||||
template <typename ExecutorT>
|
||||
Executor::KeepAlive<ExecutorT> getKeepAliveToken(ExecutorT* executor) {
|
||||
static_assert(
|
||||
std::is_base_of<Executor, ExecutorT>::value,
|
||||
"getKeepAliveToken only works for folly::Executor implementations.");
|
||||
return Executor::getKeepAliveToken(executor);
|
||||
}
|
||||
|
||||
template <typename ExecutorT>
|
||||
Executor::KeepAlive<ExecutorT> getKeepAliveToken(ExecutorT& executor) {
|
||||
static_assert(
|
||||
std::is_base_of<Executor, ExecutorT>::value,
|
||||
"getKeepAliveToken only works for folly::Executor implementations.");
|
||||
return getKeepAliveToken(&executor);
|
||||
}
|
||||
|
||||
} // namespace folly
|
||||
|
263
ios/Pods/Folly/folly/Expected.h
generated
263
ios/Pods/Folly/folly/Expected.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2016-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -13,9 +13,8 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Like folly::Optional, but can store a value *or* and error.
|
||||
* Like folly::Optional, but can store a value *or* an error.
|
||||
*
|
||||
* @author Eric Niebler (eniebler@fb.com)
|
||||
*/
|
||||
@ -29,11 +28,17 @@
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
|
||||
#include <folly/CPortability.h>
|
||||
#include <folly/CppAttributes.h>
|
||||
#include <folly/Likely.h>
|
||||
#include <folly/Optional.h>
|
||||
#include <folly/Portability.h>
|
||||
#include <folly/Preprocessor.h>
|
||||
#include <folly/Traits.h> // for construct_in_place_t
|
||||
#include <folly/Traits.h>
|
||||
#include <folly/Unit.h>
|
||||
#include <folly/Utility.h>
|
||||
#include <folly/lang/ColdClass.h>
|
||||
#include <folly/lang/Exception.h>
|
||||
|
||||
#define FOLLY_EXPECTED_ID(X) FB_CONCATENATE(FB_CONCATENATE(Folly, X), __LINE__)
|
||||
|
||||
@ -92,6 +97,10 @@ using ExpectedErrorType =
|
||||
|
||||
// Details...
|
||||
namespace expected_detail {
|
||||
|
||||
template <typename Value, typename Error>
|
||||
struct PromiseReturn;
|
||||
|
||||
#ifdef _MSC_VER
|
||||
// MSVC 2015 can't handle the StrictConjunction, so we have
|
||||
// to use std::conjunction instead.
|
||||
@ -128,27 +137,29 @@ using IsConvertible = StrictConjunction<
|
||||
std::is_assignable<To&, From>>;
|
||||
|
||||
template <class T, class U>
|
||||
auto doEmplaceAssign(int, T& t, U&& u) -> decltype(void(t = (U &&)u)) {
|
||||
t = (U &&)u;
|
||||
auto doEmplaceAssign(int, T& t, U&& u)
|
||||
-> decltype(void(t = static_cast<U&&>(u))) {
|
||||
t = static_cast<U&&>(u);
|
||||
}
|
||||
|
||||
template <class T, class U>
|
||||
auto doEmplaceAssign(long, T& t, U&& u) -> decltype(void(T((U &&)u))) {
|
||||
auto doEmplaceAssign(long, T& t, U&& u)
|
||||
-> decltype(void(T(static_cast<U&&>(u)))) {
|
||||
t.~T();
|
||||
::new ((void*)std::addressof(t)) T((U &&)u);
|
||||
::new ((void*)std::addressof(t)) T(static_cast<U&&>(u));
|
||||
}
|
||||
|
||||
template <class T, class... Us>
|
||||
auto doEmplaceAssign(int, T& t, Us&&... us)
|
||||
-> decltype(void(t = T((Us &&)us...))) {
|
||||
t = T((Us &&)us...);
|
||||
-> decltype(void(t = T(static_cast<Us&&>(us)...))) {
|
||||
t = T(static_cast<Us&&>(us)...);
|
||||
}
|
||||
|
||||
template <class T, class... Us>
|
||||
auto doEmplaceAssign(long, T& t, Us&&... us)
|
||||
-> decltype(void(T((Us &&)us...))) {
|
||||
-> decltype(void(T(static_cast<Us&&>(us)...))) {
|
||||
t.~T();
|
||||
::new ((void*)std::addressof(t)) T((Us &&)us...);
|
||||
::new ((void*)std::addressof(t)) T(static_cast<Us&&>(us)...);
|
||||
}
|
||||
|
||||
struct EmptyTag {};
|
||||
@ -159,7 +170,7 @@ enum class StorageType { ePODStruct, ePODUnion, eUnion };
|
||||
|
||||
template <class Value, class Error>
|
||||
constexpr StorageType getStorageType() {
|
||||
return StrictAllOf<IsTriviallyCopyable, Value, Error>::value
|
||||
return StrictAllOf<is_trivially_copyable, Value, Error>::value
|
||||
? (sizeof(std::pair<Value, Error>) <= sizeof(void * [2]) &&
|
||||
StrictAllOf<std::is_trivial, Value, Error>::value
|
||||
? StorageType::ePODStruct
|
||||
@ -233,6 +244,11 @@ struct ExpectedStorage {
|
||||
Value&& value() && {
|
||||
return std::move(value_);
|
||||
}
|
||||
// TODO (t17322426): remove when VS2015 support is deprecated
|
||||
// VS2015 static analyzer incorrectly flags these as unreachable in certain
|
||||
// circumstances. VS2017 does not have this problem on the same code.
|
||||
FOLLY_PUSH_WARNING
|
||||
FOLLY_MSVC_DISABLE_WARNING(4702) // unreachable code
|
||||
Error& error() & {
|
||||
return error_;
|
||||
}
|
||||
@ -242,6 +258,7 @@ struct ExpectedStorage {
|
||||
Error&& error() && {
|
||||
return std::move(error_);
|
||||
}
|
||||
FOLLY_POP_WARNING
|
||||
};
|
||||
|
||||
template <class Value, class Error>
|
||||
@ -445,7 +462,7 @@ struct ExpectedStorage<Value, Error, StorageType::eUnion>
|
||||
this->which_ = Which::eError;
|
||||
}
|
||||
}
|
||||
bool isThis(const ExpectedStorage* that) const {
|
||||
bool isSelfAssign(const ExpectedStorage* that) const {
|
||||
return this == that;
|
||||
}
|
||||
constexpr bool isSelfAssign(const void*) const {
|
||||
@ -453,8 +470,9 @@ struct ExpectedStorage<Value, Error, StorageType::eUnion>
|
||||
}
|
||||
template <class Other>
|
||||
void assign(Other&& that) {
|
||||
if (isSelfAssign(&that))
|
||||
if (isSelfAssign(&that)) {
|
||||
return;
|
||||
}
|
||||
switch (that.which_) {
|
||||
case Which::eValue:
|
||||
this->assignValue(static_cast<Other&&>(that).value());
|
||||
@ -527,6 +545,11 @@ struct ExpectedStorage<Value, Error, StorageType::ePODStruct> {
|
||||
Value&& value() && {
|
||||
return std::move(value_);
|
||||
}
|
||||
// TODO (t17322426): remove when VS2015 support is deprecated
|
||||
// VS2015 static analyzer incorrectly flags these as unreachable in certain
|
||||
// circumstances. VS2017 does not have this problem on the same code.
|
||||
FOLLY_PUSH_WARNING
|
||||
FOLLY_MSVC_DISABLE_WARNING(4702) // unreachable code
|
||||
Error& error() & {
|
||||
return error_;
|
||||
}
|
||||
@ -536,6 +559,7 @@ struct ExpectedStorage<Value, Error, StorageType::ePODStruct> {
|
||||
Error&& error() && {
|
||||
return std::move(error_);
|
||||
}
|
||||
FOLLY_POP_WARNING
|
||||
};
|
||||
|
||||
namespace expected_detail_ExpectedHelper {
|
||||
@ -577,13 +601,14 @@ struct ExpectedHelper {
|
||||
T::template return_<E>(
|
||||
(std::declval<Fn>()(std::declval<This>().value()), unit)),
|
||||
std::declval<Fns>()...)) {
|
||||
if (LIKELY(ex.which_ == expected_detail::Which::eValue))
|
||||
if (LIKELY(ex.which_ == expected_detail::Which::eValue)) {
|
||||
return T::then_(
|
||||
T::template return_<E>(
|
||||
// Uses the comma operator defined above IFF the lambda
|
||||
// returns non-void.
|
||||
(static_cast<Fn&&>(fn)(static_cast<This&&>(ex).value()), unit)),
|
||||
static_cast<Fns&&>(fns)...);
|
||||
}
|
||||
return makeUnexpected(static_cast<This&&>(ex).error());
|
||||
}
|
||||
|
||||
@ -595,9 +620,10 @@ struct ExpectedHelper {
|
||||
class Err = decltype(std::declval<No>()(std::declval<This>().error()))
|
||||
FOLLY_REQUIRES_TRAILING(!std::is_void<Err>::value)>
|
||||
static Ret thenOrThrow_(This&& ex, Yes&& yes, No&& no) {
|
||||
if (LIKELY(ex.which_ == expected_detail::Which::eValue))
|
||||
if (LIKELY(ex.which_ == expected_detail::Which::eValue)) {
|
||||
return Ret(static_cast<Yes&&>(yes)(static_cast<This&&>(ex).value()));
|
||||
throw static_cast<No&&>(no)(static_cast<This&&>(ex).error());
|
||||
}
|
||||
throw_exception(static_cast<No&&>(no)(static_cast<This&&>(ex).error()));
|
||||
}
|
||||
|
||||
template <
|
||||
@ -608,15 +634,16 @@ struct ExpectedHelper {
|
||||
class Err = decltype(std::declval<No>()(std::declval<This&>().error()))
|
||||
FOLLY_REQUIRES_TRAILING(std::is_void<Err>::value)>
|
||||
static Ret thenOrThrow_(This&& ex, Yes&& yes, No&& no) {
|
||||
if (LIKELY(ex.which_ == expected_detail::Which::eValue))
|
||||
if (LIKELY(ex.which_ == expected_detail::Which::eValue)) {
|
||||
return Ret(static_cast<Yes&&>(yes)(static_cast<This&&>(ex).value()));
|
||||
}
|
||||
static_cast<No&&>(no)(ex.error());
|
||||
throw typename Unexpected<ExpectedErrorType<This>>::MakeBadExpectedAccess()(
|
||||
static_cast<This&&>(ex).error());
|
||||
typename Unexpected<ExpectedErrorType<This>>::MakeBadExpectedAccess bad;
|
||||
throw_exception(bad(static_cast<This&&>(ex).error()));
|
||||
}
|
||||
FOLLY_POP_WARNING
|
||||
};
|
||||
}
|
||||
} // namespace expected_detail_ExpectedHelper
|
||||
/* using override */ using expected_detail_ExpectedHelper::ExpectedHelper;
|
||||
|
||||
struct UnexpectedTag {};
|
||||
@ -634,17 +661,21 @@ inline expected_detail::UnexpectedTag unexpected(
|
||||
/**
|
||||
* An exception type thrown by Expected on catastrophic logic errors.
|
||||
*/
|
||||
class BadExpectedAccess : public std::logic_error {
|
||||
class FOLLY_EXPORT BadExpectedAccess : public std::logic_error {
|
||||
public:
|
||||
BadExpectedAccess() : std::logic_error("bad Expected access") {}
|
||||
};
|
||||
|
||||
namespace expected_detail {
|
||||
// empty
|
||||
} // namespace expected_detail
|
||||
|
||||
/**
|
||||
* Unexpected - a helper type used to disambiguate the construction of
|
||||
* Expected objects in the error state.
|
||||
*/
|
||||
template <class Error>
|
||||
class Unexpected final {
|
||||
class Unexpected final : ColdClass {
|
||||
template <class E>
|
||||
friend class Unexpected;
|
||||
template <class V, class E>
|
||||
@ -657,7 +688,7 @@ class Unexpected final {
|
||||
* when the user tries to access the nested value but the Expected object is
|
||||
* actually storing an error code.
|
||||
*/
|
||||
class BadExpectedAccess : public folly::BadExpectedAccess {
|
||||
class FOLLY_EXPORT BadExpectedAccess : public folly::BadExpectedAccess {
|
||||
public:
|
||||
explicit BadExpectedAccess(Error err)
|
||||
: folly::BadExpectedAccess{}, error_(std::move(err)) {}
|
||||
@ -874,8 +905,6 @@ class Expected final : expected_detail::ExpectedStorage<Value, Error> {
|
||||
public:
|
||||
using value_type = Value;
|
||||
using error_type = Error;
|
||||
using IsTriviallyCopyable = typename expected_detail::
|
||||
StrictAllOf<IsTriviallyCopyable, Value, Error>::type;
|
||||
|
||||
template <class U>
|
||||
using rebind = Expected<U, Error>;
|
||||
@ -1017,6 +1046,12 @@ class Expected final : expected_detail::ExpectedStorage<Value, Error> {
|
||||
return *this;
|
||||
}
|
||||
|
||||
// Used only when an Expected is used with coroutines on MSVC
|
||||
/* implicit */ Expected(const expected_detail::PromiseReturn<Value, Error>& p)
|
||||
: Expected{} {
|
||||
p.promise_->value_ = this;
|
||||
}
|
||||
|
||||
template <class... Ts FOLLY_REQUIRES_TRAILING(
|
||||
std::is_constructible<Value, Ts&&...>::value)>
|
||||
void emplace(Ts&&... ts) {
|
||||
@ -1028,8 +1063,9 @@ class Expected final : expected_detail::ExpectedStorage<Value, Error> {
|
||||
*/
|
||||
void swap(Expected& that) noexcept(
|
||||
expected_detail::StrictAllOf<IsNothrowSwappable, Value, Error>::value) {
|
||||
if (this->uninitializedByException() || that.uninitializedByException())
|
||||
throw BadExpectedAccess();
|
||||
if (this->uninitializedByException() || that.uninitializedByException()) {
|
||||
throw_exception<BadExpectedAccess>();
|
||||
}
|
||||
using std::swap;
|
||||
if (*this) {
|
||||
if (that) {
|
||||
@ -1069,11 +1105,11 @@ class Expected final : expected_detail::ExpectedStorage<Value, Error> {
|
||||
* Accessors
|
||||
*/
|
||||
constexpr bool hasValue() const noexcept {
|
||||
return expected_detail::Which::eValue == this->which_;
|
||||
return LIKELY(expected_detail::Which::eValue == this->which_);
|
||||
}
|
||||
|
||||
constexpr bool hasError() const noexcept {
|
||||
return expected_detail::Which::eError == this->which_;
|
||||
return UNLIKELY(expected_detail::Which::eError == this->which_);
|
||||
}
|
||||
|
||||
using Base::uninitializedByException;
|
||||
@ -1167,8 +1203,9 @@ class Expected final : expected_detail::ExpectedStorage<Value, Error> {
|
||||
expected_detail::ExpectedHelper::then_(
|
||||
std::declval<const Base&>(),
|
||||
std::declval<Fns>()...)) {
|
||||
if (this->uninitializedByException())
|
||||
throw BadExpectedAccess();
|
||||
if (this->uninitializedByException()) {
|
||||
throw_exception<BadExpectedAccess>();
|
||||
}
|
||||
return expected_detail::ExpectedHelper::then_(
|
||||
base(), static_cast<Fns&&>(fns)...);
|
||||
}
|
||||
@ -1177,8 +1214,9 @@ class Expected final : expected_detail::ExpectedStorage<Value, Error> {
|
||||
auto then(Fns&&... fns) & -> decltype(expected_detail::ExpectedHelper::then_(
|
||||
std::declval<Base&>(),
|
||||
std::declval<Fns>()...)) {
|
||||
if (this->uninitializedByException())
|
||||
throw BadExpectedAccess();
|
||||
if (this->uninitializedByException()) {
|
||||
throw_exception<BadExpectedAccess>();
|
||||
}
|
||||
return expected_detail::ExpectedHelper::then_(
|
||||
base(), static_cast<Fns&&>(fns)...);
|
||||
}
|
||||
@ -1187,8 +1225,9 @@ class Expected final : expected_detail::ExpectedStorage<Value, Error> {
|
||||
auto then(Fns&&... fns) && -> decltype(expected_detail::ExpectedHelper::then_(
|
||||
std::declval<Base&&>(),
|
||||
std::declval<Fns>()...)) {
|
||||
if (this->uninitializedByException())
|
||||
throw BadExpectedAccess();
|
||||
if (this->uninitializedByException()) {
|
||||
throw_exception<BadExpectedAccess>();
|
||||
}
|
||||
return expected_detail::ExpectedHelper::then_(
|
||||
std::move(base()), static_cast<Fns&&>(fns)...);
|
||||
}
|
||||
@ -1200,8 +1239,9 @@ class Expected final : expected_detail::ExpectedStorage<Value, Error> {
|
||||
auto thenOrThrow(Yes&& yes, No&& no = No{}) const& -> decltype(
|
||||
std::declval<Yes>()(std::declval<const Value&>())) {
|
||||
using Ret = decltype(std::declval<Yes>()(std::declval<const Value&>()));
|
||||
if (this->uninitializedByException())
|
||||
throw BadExpectedAccess();
|
||||
if (this->uninitializedByException()) {
|
||||
throw_exception<BadExpectedAccess>();
|
||||
}
|
||||
return Ret(expected_detail::ExpectedHelper::thenOrThrow_(
|
||||
base(), static_cast<Yes&&>(yes), static_cast<No&&>(no)));
|
||||
}
|
||||
@ -1210,8 +1250,9 @@ class Expected final : expected_detail::ExpectedStorage<Value, Error> {
|
||||
auto thenOrThrow(Yes&& yes, No&& no = No{}) & -> decltype(
|
||||
std::declval<Yes>()(std::declval<Value&>())) {
|
||||
using Ret = decltype(std::declval<Yes>()(std::declval<Value&>()));
|
||||
if (this->uninitializedByException())
|
||||
throw BadExpectedAccess();
|
||||
if (this->uninitializedByException()) {
|
||||
throw_exception<BadExpectedAccess>();
|
||||
}
|
||||
return Ret(expected_detail::ExpectedHelper::thenOrThrow_(
|
||||
base(), static_cast<Yes&&>(yes), static_cast<No&&>(no)));
|
||||
}
|
||||
@ -1220,8 +1261,9 @@ class Expected final : expected_detail::ExpectedStorage<Value, Error> {
|
||||
auto thenOrThrow(Yes&& yes, No&& no = No{}) && -> decltype(
|
||||
std::declval<Yes>()(std::declval<Value&&>())) {
|
||||
using Ret = decltype(std::declval<Yes>()(std::declval<Value&&>()));
|
||||
if (this->uninitializedByException())
|
||||
throw BadExpectedAccess();
|
||||
if (this->uninitializedByException()) {
|
||||
throw_exception<BadExpectedAccess>();
|
||||
}
|
||||
return Ret(expected_detail::ExpectedHelper::thenOrThrow_(
|
||||
std::move(base()), static_cast<Yes&&>(yes), static_cast<No&&>(no)));
|
||||
}
|
||||
@ -1229,15 +1271,17 @@ class Expected final : expected_detail::ExpectedStorage<Value, Error> {
|
||||
private:
|
||||
void requireValue() const {
|
||||
if (UNLIKELY(!hasValue())) {
|
||||
if (LIKELY(hasError()))
|
||||
throw typename Unexpected<Error>::BadExpectedAccess(this->error_);
|
||||
throw BadExpectedAccess();
|
||||
if (LIKELY(hasError())) {
|
||||
using Err = typename Unexpected<Error>::BadExpectedAccess;
|
||||
throw_exception<Err>(this->error_);
|
||||
}
|
||||
throw_exception<BadExpectedAccess>();
|
||||
}
|
||||
}
|
||||
|
||||
void requireError() const {
|
||||
if (UNLIKELY(!hasError())) {
|
||||
throw BadExpectedAccess();
|
||||
throw_exception<BadExpectedAccess>();
|
||||
}
|
||||
}
|
||||
|
||||
@ -1251,13 +1295,15 @@ inline typename std::enable_if<IsEqualityComparable<Value>::value, bool>::type
|
||||
operator==(
|
||||
const Expected<Value, Error>& lhs,
|
||||
const Expected<Value, Error>& rhs) {
|
||||
if (UNLIKELY(lhs.which_ != rhs.which_))
|
||||
return UNLIKELY(lhs.uninitializedByException()) ? false
|
||||
: throw BadExpectedAccess();
|
||||
if (UNLIKELY(lhs.uninitializedByException()))
|
||||
throw BadExpectedAccess();
|
||||
if (UNLIKELY(lhs.hasError()))
|
||||
if (UNLIKELY(lhs.uninitializedByException())) {
|
||||
throw_exception<BadExpectedAccess>();
|
||||
}
|
||||
if (UNLIKELY(lhs.which_ != rhs.which_)) {
|
||||
return false;
|
||||
}
|
||||
if (UNLIKELY(lhs.hasError())) {
|
||||
return true; // All error states are considered equal
|
||||
}
|
||||
return lhs.value_ == rhs.value_;
|
||||
}
|
||||
|
||||
@ -1276,12 +1322,15 @@ operator<(
|
||||
const Expected<Value, Error>& lhs,
|
||||
const Expected<Value, Error>& rhs) {
|
||||
if (UNLIKELY(
|
||||
lhs.uninitializedByException() || rhs.uninitializedByException()))
|
||||
throw BadExpectedAccess();
|
||||
if (UNLIKELY(lhs.hasError()))
|
||||
lhs.uninitializedByException() || rhs.uninitializedByException())) {
|
||||
throw_exception<BadExpectedAccess>();
|
||||
}
|
||||
if (UNLIKELY(lhs.hasError())) {
|
||||
return !rhs.hasError();
|
||||
if (UNLIKELY(rhs.hasError()))
|
||||
}
|
||||
if (UNLIKELY(rhs.hasError())) {
|
||||
return false;
|
||||
}
|
||||
return lhs.value_ < rhs.value_;
|
||||
}
|
||||
|
||||
@ -1315,8 +1364,8 @@ inline bool operator>=(
|
||||
/**
|
||||
* swap Expected values
|
||||
*/
|
||||
template <class Error, class Value>
|
||||
void swap(Expected<Error, Value>& lhs, Expected<Value, Error>& rhs) noexcept(
|
||||
template <class Value, class Error>
|
||||
void swap(Expected<Value, Error>& lhs, Expected<Value, Error>& rhs) noexcept(
|
||||
expected_detail::StrictAllOf<IsNothrowSwappable, Value, Error>::value) {
|
||||
lhs.swap(rhs);
|
||||
}
|
||||
@ -1382,3 +1431,99 @@ bool operator>(const Value& other, const Expected<Value, Error>&) = delete;
|
||||
|
||||
#undef FOLLY_REQUIRES
|
||||
#undef FOLLY_REQUIRES_TRAILING
|
||||
|
||||
// Enable the use of folly::Expected with `co_await`
|
||||
// Inspired by https://github.com/toby-allsopp/coroutine_monad
|
||||
#if FOLLY_HAS_COROUTINES
|
||||
#include <experimental/coroutine>
|
||||
|
||||
namespace folly {
|
||||
namespace expected_detail {
|
||||
template <typename Value, typename Error>
|
||||
struct Promise;
|
||||
|
||||
template <typename Value, typename Error>
|
||||
struct PromiseReturn {
|
||||
Optional<Expected<Value, Error>> storage_;
|
||||
Promise<Value, Error>* promise_;
|
||||
/* implicit */ PromiseReturn(Promise<Value, Error>& promise) noexcept
|
||||
: promise_(&promise) {
|
||||
promise_->value_ = &storage_;
|
||||
}
|
||||
PromiseReturn(PromiseReturn&& that) noexcept
|
||||
: PromiseReturn{*that.promise_} {}
|
||||
~PromiseReturn() {}
|
||||
/* implicit */ operator Expected<Value, Error>() & {
|
||||
return std::move(*storage_);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Value, typename Error>
|
||||
struct Promise {
|
||||
Optional<Expected<Value, Error>>* value_ = nullptr;
|
||||
Promise() = default;
|
||||
Promise(Promise const&) = delete;
|
||||
// This should work regardless of whether the compiler generates:
|
||||
// folly::Expected<Value, Error> retobj{ p.get_return_object(); } // MSVC
|
||||
// or:
|
||||
// auto retobj = p.get_return_object(); // clang
|
||||
PromiseReturn<Value, Error> get_return_object() noexcept {
|
||||
return *this;
|
||||
}
|
||||
std::experimental::suspend_never initial_suspend() const noexcept {
|
||||
return {};
|
||||
}
|
||||
std::experimental::suspend_never final_suspend() const {
|
||||
return {};
|
||||
}
|
||||
template <typename U>
|
||||
void return_value(U&& u) {
|
||||
value_->emplace(static_cast<U&&>(u));
|
||||
}
|
||||
void unhandled_exception() {
|
||||
// Technically, throwing from unhandled_exception is underspecified:
|
||||
// https://github.com/GorNishanov/CoroutineWording/issues/17
|
||||
throw;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Value, typename Error>
|
||||
struct Awaitable {
|
||||
Expected<Value, Error> o_;
|
||||
|
||||
explicit Awaitable(Expected<Value, Error> o) : o_(std::move(o)) {}
|
||||
|
||||
bool await_ready() const noexcept {
|
||||
return o_.hasValue();
|
||||
}
|
||||
Value await_resume() {
|
||||
return std::move(o_.value());
|
||||
}
|
||||
|
||||
// Explicitly only allow suspension into a Promise
|
||||
template <typename U>
|
||||
void await_suspend(std::experimental::coroutine_handle<Promise<U, Error>> h) {
|
||||
*h.promise().value_ = makeUnexpected(std::move(o_.error()));
|
||||
// Abort the rest of the coroutine. resume() is not going to be called
|
||||
h.destroy();
|
||||
}
|
||||
};
|
||||
} // namespace expected_detail
|
||||
|
||||
template <typename Value, typename Error>
|
||||
expected_detail::Awaitable<Value, Error>
|
||||
/* implicit */ operator co_await(Expected<Value, Error> o) {
|
||||
return expected_detail::Awaitable<Value, Error>{std::move(o)};
|
||||
}
|
||||
} // namespace folly
|
||||
|
||||
// This makes folly::Expected<Value> useable as a coroutine return type...
|
||||
namespace std {
|
||||
namespace experimental {
|
||||
template <typename Value, typename Error, typename... Args>
|
||||
struct coroutine_traits<folly::Expected<Value, Error>, Args...> {
|
||||
using promise_type = folly::expected_detail::Promise<Value, Error>;
|
||||
};
|
||||
} // namespace experimental
|
||||
} // namespace std
|
||||
#endif // FOLLY_HAS_COROUTINES
|
||||
|
1195
ios/Pods/Folly/folly/FBString.h
generated
1195
ios/Pods/Folly/folly/FBString.h
generated
File diff suppressed because it is too large
Load Diff
774
ios/Pods/Folly/folly/FBVector.h
generated
774
ios/Pods/Folly/folly/FBVector.h
generated
File diff suppressed because it is too large
Load Diff
39
ios/Pods/Folly/folly/File.h
generated
39
ios/Pods/Folly/folly/File.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2013-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -21,7 +21,10 @@
|
||||
#include <sys/types.h>
|
||||
|
||||
#include <string>
|
||||
#include <system_error>
|
||||
|
||||
#include <folly/ExceptionWrapper.h>
|
||||
#include <folly/Expected.h>
|
||||
#include <folly/Portability.h>
|
||||
#include <folly/Range.h>
|
||||
#include <folly/portability/Unistd.h>
|
||||
@ -36,22 +39,39 @@ class File {
|
||||
/**
|
||||
* Creates an empty File object, for late initialization.
|
||||
*/
|
||||
File();
|
||||
File() noexcept;
|
||||
|
||||
/**
|
||||
* Create a File object from an existing file descriptor.
|
||||
* Takes ownership of the file descriptor if ownsFd is true.
|
||||
*/
|
||||
explicit File(int fd, bool ownsFd = false);
|
||||
explicit File(int fd, bool ownsFd = false) noexcept;
|
||||
|
||||
/**
|
||||
* Open and create a file object. Throws on error.
|
||||
* Owns the file descriptor implicitly.
|
||||
*/
|
||||
explicit File(const char* name, int flags = O_RDONLY, mode_t mode = 0666);
|
||||
explicit File(
|
||||
const std::string& name, int flags = O_RDONLY, mode_t mode = 0666);
|
||||
const std::string& name,
|
||||
int flags = O_RDONLY,
|
||||
mode_t mode = 0666);
|
||||
explicit File(StringPiece name, int flags = O_RDONLY, mode_t mode = 0666);
|
||||
|
||||
/**
|
||||
* All the constructors that are not noexcept can throw std::system_error.
|
||||
* This is a helper method to use folly::Expected to chain a file open event
|
||||
* to something else you want to do with the open fd.
|
||||
*/
|
||||
template <typename... Args>
|
||||
static Expected<File, exception_wrapper> makeFile(Args&&... args) noexcept {
|
||||
try {
|
||||
return File(std::forward<Args>(args)...);
|
||||
} catch (const std::system_error& se) {
|
||||
return makeUnexpected(exception_wrapper(std::current_exception(), se));
|
||||
}
|
||||
}
|
||||
|
||||
~File();
|
||||
|
||||
/**
|
||||
@ -62,7 +82,9 @@ class File {
|
||||
/**
|
||||
* Return the file descriptor, or -1 if the file was closed.
|
||||
*/
|
||||
int fd() const { return fd_; }
|
||||
int fd() const {
|
||||
return fd_;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns 'true' iff the file was successfully opened.
|
||||
@ -97,7 +119,7 @@ class File {
|
||||
/**
|
||||
* Swap this File with another.
|
||||
*/
|
||||
void swap(File& other);
|
||||
void swap(File& other) noexcept;
|
||||
|
||||
// movable
|
||||
File(File&&) noexcept;
|
||||
@ -131,7 +153,6 @@ class File {
|
||||
bool ownsFd_;
|
||||
};
|
||||
|
||||
void swap(File& a, File& b);
|
||||
void swap(File& a, File& b) noexcept;
|
||||
|
||||
|
||||
} // namespace folly
|
||||
} // namespace folly
|
||||
|
101
ios/Pods/Folly/folly/FileUtil.h
generated
101
ios/Pods/Folly/folly/FileUtil.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2013-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -16,18 +16,19 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <folly/Conv.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#include <cassert>
|
||||
#include <limits>
|
||||
|
||||
#include <folly/Portability.h>
|
||||
#include <folly/Range.h>
|
||||
#include <folly/ScopeGuard.h>
|
||||
#include <folly/portability/Fcntl.h>
|
||||
#include <folly/portability/SysUio.h>
|
||||
#include <folly/portability/Unistd.h>
|
||||
|
||||
#include <cassert>
|
||||
#include <limits>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
namespace folly {
|
||||
|
||||
/**
|
||||
@ -77,10 +78,10 @@ ssize_t writevNoInt(int fd, const iovec* iov, int count);
|
||||
* readv and preadv. The contents of iov after these functions return
|
||||
* is unspecified.
|
||||
*/
|
||||
ssize_t readFull(int fd, void* buf, size_t n);
|
||||
ssize_t preadFull(int fd, void* buf, size_t n, off_t offset);
|
||||
ssize_t readvFull(int fd, iovec* iov, int count);
|
||||
ssize_t preadvFull(int fd, iovec* iov, int count, off_t offset);
|
||||
FOLLY_NODISCARD ssize_t readFull(int fd, void* buf, size_t n);
|
||||
FOLLY_NODISCARD ssize_t preadFull(int fd, void* buf, size_t n, off_t offset);
|
||||
FOLLY_NODISCARD ssize_t readvFull(int fd, iovec* iov, int count);
|
||||
FOLLY_NODISCARD ssize_t preadvFull(int fd, iovec* iov, int count, off_t offset);
|
||||
|
||||
/**
|
||||
* Similar to readFull and preadFull above, wrappers around write() and
|
||||
@ -95,6 +96,9 @@ ssize_t preadvFull(int fd, iovec* iov, int count, off_t offset);
|
||||
* Note that writevFull and pwritevFull require iov to be non-const, unlike
|
||||
* writev and pwritev. The contents of iov after these functions return
|
||||
* is unspecified.
|
||||
*
|
||||
* These functions return -1 on error, or the total number of bytes written
|
||||
* (which is always the same as the number of requested bytes) on success.
|
||||
*/
|
||||
ssize_t writeFull(int fd, const void* buf, size_t n);
|
||||
ssize_t pwriteFull(int fd, const void* buf, size_t n, off_t offset);
|
||||
@ -116,8 +120,9 @@ bool readFile(
|
||||
int fd,
|
||||
Container& out,
|
||||
size_t num_bytes = std::numeric_limits<size_t>::max()) {
|
||||
static_assert(sizeof(out[0]) == 1,
|
||||
"readFile: only containers with byte-sized elements accepted");
|
||||
static_assert(
|
||||
sizeof(out[0]) == 1,
|
||||
"readFile: only containers with byte-sized elements accepted");
|
||||
|
||||
size_t soFar = 0; // amount of bytes successfully read
|
||||
SCOPE_EXIT {
|
||||
@ -127,17 +132,17 @@ bool readFile(
|
||||
|
||||
// Obtain file size:
|
||||
struct stat buf;
|
||||
if (fstat(fd, &buf) == -1) return false;
|
||||
if (fstat(fd, &buf) == -1) {
|
||||
return false;
|
||||
}
|
||||
// Some files (notably under /proc and /sys on Linux) lie about
|
||||
// their size, so treat the size advertised by fstat under advise
|
||||
// but don't rely on it. In particular, if the size is zero, we
|
||||
// should attempt to read stuff. If not zero, we'll attempt to read
|
||||
// one extra byte.
|
||||
constexpr size_t initialAlloc = 1024 * 4;
|
||||
out.resize(
|
||||
std::min(
|
||||
buf.st_size > 0 ? folly::to<size_t>(buf.st_size + 1) : initialAlloc,
|
||||
num_bytes));
|
||||
out.resize(std::min(
|
||||
buf.st_size > 0 ? (size_t(buf.st_size) + 1) : initialAlloc, num_bytes));
|
||||
|
||||
while (soFar < out.size()) {
|
||||
const auto actual = readFull(fd, &out[soFar], out.size() - soFar);
|
||||
@ -167,7 +172,7 @@ bool readFile(
|
||||
size_t num_bytes = std::numeric_limits<size_t>::max()) {
|
||||
DCHECK(file_name);
|
||||
|
||||
const auto fd = openNoInt(file_name, O_RDONLY);
|
||||
const auto fd = openNoInt(file_name, O_RDONLY | O_CLOEXEC);
|
||||
if (fd == -1) {
|
||||
return false;
|
||||
}
|
||||
@ -191,19 +196,63 @@ bool readFile(
|
||||
*
|
||||
* Returns: true on success or false on failure. In the latter case
|
||||
* errno will be set appropriately by the failing system primitive.
|
||||
*
|
||||
* Note that this function may leave the file in a partially written state on
|
||||
* failure. Use writeFileAtomic() if you want to ensure that the existing file
|
||||
* state will be unchanged on error.
|
||||
*/
|
||||
template <class Container>
|
||||
bool writeFile(const Container& data, const char* filename,
|
||||
int flags = O_WRONLY | O_CREAT | O_TRUNC) {
|
||||
static_assert(sizeof(data[0]) == 1,
|
||||
"writeFile works with element size equal to 1");
|
||||
int fd = open(filename, flags, 0666);
|
||||
bool writeFile(
|
||||
const Container& data,
|
||||
const char* filename,
|
||||
int flags = O_WRONLY | O_CREAT | O_TRUNC,
|
||||
mode_t mode = 0666) {
|
||||
static_assert(
|
||||
sizeof(data[0]) == 1, "writeFile works with element size equal to 1");
|
||||
int fd = open(filename, flags, mode);
|
||||
if (fd == -1) {
|
||||
return false;
|
||||
}
|
||||
bool ok = data.empty() ||
|
||||
writeFull(fd, &data[0], data.size()) == static_cast<ssize_t>(data.size());
|
||||
writeFull(fd, &data[0], data.size()) == static_cast<ssize_t>(data.size());
|
||||
return closeNoInt(fd) == 0 && ok;
|
||||
}
|
||||
|
||||
} // namespaces
|
||||
/**
|
||||
* Write file contents "atomically".
|
||||
*
|
||||
* This writes the data to a temporary file in the destination directory, and
|
||||
* then renames it to the specified path. This guarantees that the specified
|
||||
* file will be replaced the the specified contents on success, or will not be
|
||||
* modified on failure.
|
||||
*
|
||||
* Note that on platforms that do not provide atomic filesystem rename
|
||||
* functionality (e.g., Windows) this behavior may not be truly atomic.
|
||||
*/
|
||||
void writeFileAtomic(
|
||||
StringPiece filename,
|
||||
iovec* iov,
|
||||
int count,
|
||||
mode_t permissions = 0644);
|
||||
void writeFileAtomic(
|
||||
StringPiece filename,
|
||||
ByteRange data,
|
||||
mode_t permissions = 0644);
|
||||
void writeFileAtomic(
|
||||
StringPiece filename,
|
||||
StringPiece data,
|
||||
mode_t permissions = 0644);
|
||||
|
||||
/**
|
||||
* A version of writeFileAtomic() that returns an errno value instead of
|
||||
* throwing on error.
|
||||
*
|
||||
* Returns 0 on success or an errno value on error.
|
||||
*/
|
||||
int writeFileAtomicNoThrow(
|
||||
StringPiece filename,
|
||||
iovec* iov,
|
||||
int count,
|
||||
mode_t permissions = 0644);
|
||||
|
||||
} // namespace folly
|
||||
|
64
ios/Pods/Folly/folly/Fingerprint.h
generated
64
ios/Pods/Folly/folly/Fingerprint.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2012-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -28,8 +28,7 @@
|
||||
* extended for fingerprints larger than 64 bits, and modified to use
|
||||
* 64-bit instead of 32-bit integers for computation.
|
||||
*
|
||||
* The precomputed tables are in FingerprintTable.cpp, which is automatically
|
||||
* generated by ComputeFingerprintTable.cpp.
|
||||
* The precomputed tables are in Fingerprint.cpp.
|
||||
*
|
||||
* Benchmarked on 10/13/2009 on a 2.5GHz quad-core Xeon L5420,
|
||||
* - Fingerprint<64>::update64() takes about 12ns
|
||||
@ -44,6 +43,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <cstdint>
|
||||
|
||||
#include <folly/Range.h>
|
||||
@ -52,28 +52,42 @@ namespace folly {
|
||||
|
||||
namespace detail {
|
||||
|
||||
constexpr size_t poly_size(size_t bits) {
|
||||
return 1 + (bits - 1) / 64;
|
||||
}
|
||||
|
||||
template <size_t Deg>
|
||||
using poly_table =
|
||||
std::array<std::array<std::array<uint64_t, poly_size(Deg)>, 256>, 8>;
|
||||
|
||||
template <int BITS>
|
||||
struct FingerprintTable {
|
||||
static const uint64_t poly[1 + (BITS - 1) / 64];
|
||||
static const uint64_t table[8][256][1 + (BITS - 1) / 64];
|
||||
static const uint64_t poly[poly_size(BITS)];
|
||||
static const poly_table<BITS> table;
|
||||
};
|
||||
|
||||
template <int BITS>
|
||||
const uint64_t FingerprintTable<BITS>::poly[1 + (BITS - 1) / 64] = {};
|
||||
const uint64_t FingerprintTable<BITS>::poly[poly_size(BITS)] = {};
|
||||
template <int BITS>
|
||||
const uint64_t FingerprintTable<BITS>::table[8][256][1 + (BITS - 1) / 64] = {};
|
||||
const poly_table<BITS> FingerprintTable<BITS>::table = {};
|
||||
|
||||
#define FOLLY_DECLARE_FINGERPRINT_TABLES(BITS) \
|
||||
template <> \
|
||||
const uint64_t FingerprintTable<BITS>::poly[1 + (BITS - 1) / 64]; \
|
||||
template <> \
|
||||
const uint64_t FingerprintTable<BITS>::table[8][256][1 + (BITS - 1) / 64]
|
||||
#ifndef _MSC_VER
|
||||
// MSVC 2015 can't handle these extern specialization declarations,
|
||||
// but they aren't needed for things to work right, so we just don't
|
||||
// declare them in the header for MSVC.
|
||||
|
||||
#define FOLLY_DECLARE_FINGERPRINT_TABLES(BITS) \
|
||||
template <> \
|
||||
const uint64_t FingerprintTable<BITS>::poly[poly_size(BITS)]; \
|
||||
template <> \
|
||||
const poly_table<BITS> FingerprintTable<BITS>::table
|
||||
|
||||
FOLLY_DECLARE_FINGERPRINT_TABLES(64);
|
||||
FOLLY_DECLARE_FINGERPRINT_TABLES(96);
|
||||
FOLLY_DECLARE_FINGERPRINT_TABLES(128);
|
||||
|
||||
#undef FOLLY_DECLARE_FINGERPRINT_TABLES
|
||||
#endif
|
||||
|
||||
} // namespace detail
|
||||
|
||||
@ -93,8 +107,9 @@ class Fingerprint {
|
||||
Fingerprint() {
|
||||
// Use a non-zero starting value. We'll use (1 << (BITS-1))
|
||||
fp_[0] = 1ULL << 63;
|
||||
for (int i = 1; i < size(); i++)
|
||||
for (int i = 1; i < size(); i++) {
|
||||
fp_[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
Fingerprint& update8(uint8_t v) {
|
||||
@ -109,7 +124,7 @@ class Fingerprint {
|
||||
Fingerprint& update32(uint32_t v) {
|
||||
uint32_t out = shlor32(v);
|
||||
for (int i = 0; i < 4; i++) {
|
||||
xortab(detail::FingerprintTable<BITS>::table[i][out&0xff]);
|
||||
xortab(detail::FingerprintTable<BITS>::table[i][out & 0xff]);
|
||||
out >>= 8;
|
||||
}
|
||||
return *this;
|
||||
@ -118,7 +133,7 @@ class Fingerprint {
|
||||
Fingerprint& update64(uint64_t v) {
|
||||
uint64_t out = shlor64(v);
|
||||
for (int i = 0; i < 8; i++) {
|
||||
xortab(detail::FingerprintTable<BITS>::table[i][out&0xff]);
|
||||
xortab(detail::FingerprintTable<BITS>::table[i][out & 0xff]);
|
||||
out >>= 8;
|
||||
}
|
||||
return *this;
|
||||
@ -135,8 +150,8 @@ class Fingerprint {
|
||||
/**
|
||||
* Return the number of uint64s needed to hold the fingerprint value.
|
||||
*/
|
||||
static int size() {
|
||||
return 1 + (BITS-1)/64;
|
||||
constexpr static int size() {
|
||||
return detail::poly_size(BITS);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -155,7 +170,7 @@ class Fingerprint {
|
||||
|
||||
private:
|
||||
// XOR the fingerprint with a value from one of the tables.
|
||||
void xortab(const uint64_t* tab) {
|
||||
void xortab(std::array<uint64_t, detail::poly_size(BITS)> const& tab) {
|
||||
for (int i = 0; i < size(); i++) {
|
||||
fp_[i] ^= tab[i];
|
||||
}
|
||||
@ -164,11 +179,11 @@ class Fingerprint {
|
||||
// Helper functions: shift the fingerprint value left by 8/32/64 bits,
|
||||
// return the "out" value (the bits that were shifted out), and add "v"
|
||||
// in the bits on the right.
|
||||
uint8_t shlor8(uint8_t v);
|
||||
uint8_t shlor8(uint8_t v);
|
||||
uint32_t shlor32(uint32_t v);
|
||||
uint64_t shlor64(uint64_t v);
|
||||
|
||||
uint64_t fp_[1 + (BITS-1)/64];
|
||||
uint64_t fp_[detail::poly_size(BITS)];
|
||||
};
|
||||
|
||||
// Convenience functions
|
||||
@ -187,8 +202,7 @@ inline uint64_t fingerprint64(StringPiece str) {
|
||||
* Return the 64 most significant bits in *msb, and the 32 least significant
|
||||
* bits in *lsb.
|
||||
*/
|
||||
inline void fingerprint96(StringPiece str,
|
||||
uint64_t* msb, uint32_t* lsb) {
|
||||
inline void fingerprint96(StringPiece str, uint64_t* msb, uint32_t* lsb) {
|
||||
uint64_t fp[2];
|
||||
Fingerprint<96>().update(str).write(fp);
|
||||
*msb = fp[0];
|
||||
@ -200,15 +214,13 @@ inline void fingerprint96(StringPiece str,
|
||||
* Return the 64 most significant bits in *msb, and the 64 least significant
|
||||
* bits in *lsb.
|
||||
*/
|
||||
inline void fingerprint128(StringPiece str,
|
||||
uint64_t* msb, uint64_t* lsb) {
|
||||
inline void fingerprint128(StringPiece str, uint64_t* msb, uint64_t* lsb) {
|
||||
uint64_t fp[2];
|
||||
Fingerprint<128>().update(str).write(fp);
|
||||
*msb = fp[0];
|
||||
*lsb = fp[1];
|
||||
}
|
||||
|
||||
|
||||
template <>
|
||||
inline uint8_t Fingerprint<64>::shlor8(uint8_t v) {
|
||||
uint8_t out = (uint8_t)(fp_[0] >> 56);
|
||||
@ -278,4 +290,4 @@ inline uint64_t Fingerprint<128>::shlor64(uint64_t v) {
|
||||
return out;
|
||||
}
|
||||
|
||||
} // namespace folly
|
||||
} // namespace folly
|
||||
|
3106
ios/Pods/Folly/folly/FixedString.h
generated
Normal file
3106
ios/Pods/Folly/folly/FixedString.h
generated
Normal file
File diff suppressed because it is too large
Load Diff
235
ios/Pods/Folly/folly/Foreach.h
generated
235
ios/Pods/Folly/folly/Foreach.h
generated
@ -1,235 +0,0 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
/*
|
||||
* Iterim macros (until we have C++0x range-based for) that simplify
|
||||
* writing loops of the form
|
||||
*
|
||||
* for (Container<data>::iterator i = c.begin(); i != c.end(); ++i) statement
|
||||
*
|
||||
* Just replace the above with:
|
||||
*
|
||||
* FOR_EACH (i, c) statement
|
||||
*
|
||||
* and everything is taken care of.
|
||||
*
|
||||
* The implementation is a bit convoluted to make sure the container is
|
||||
* evaluated only once (however, keep in mind that c.end() is evaluated
|
||||
* at every pass through the loop). To ensure the container is not
|
||||
* evaluated multiple times, the macro defines one do-nothing if
|
||||
* statement to inject the Boolean variable FOR_EACH_state1, and then a
|
||||
* for statement that is executed only once, which defines the variable
|
||||
* FOR_EACH_state2 holding an rvalue reference to the container being
|
||||
* iterated. The workhorse is the last loop, which uses the just-defined
|
||||
* rvalue reference FOR_EACH_state2.
|
||||
*
|
||||
* The state variables are nested so they don't interfere; you can use
|
||||
* FOR_EACH multiple times in the same scope, either at the same level or
|
||||
* nested.
|
||||
*
|
||||
* In optimized builds g++ eliminates the extra gymnastics entirely and
|
||||
* generates code 100% identical to the handwritten loop.
|
||||
*/
|
||||
|
||||
#include <type_traits>
|
||||
#include <folly/Preprocessor.h>
|
||||
|
||||
/*
|
||||
* Form a local variable name from "FOR_EACH_" x __LINE__, so that
|
||||
* FOR_EACH can be nested without creating shadowed declarations.
|
||||
*/
|
||||
#define _FE_ANON(x) FB_CONCATENATE(FOR_EACH_, FB_CONCATENATE(x, __LINE__))
|
||||
|
||||
/*
|
||||
* Shorthand for:
|
||||
* for (auto i = c.begin(); i != c.end(); ++i)
|
||||
* except that c is evaluated only once.
|
||||
*/
|
||||
#define FOR_EACH(i, c) \
|
||||
if (bool _FE_ANON(s1_) = false) {} else \
|
||||
for (auto && _FE_ANON(s2_) = (c); \
|
||||
!_FE_ANON(s1_); _FE_ANON(s1_) = true) \
|
||||
for (auto i = _FE_ANON(s2_).begin(); \
|
||||
i != _FE_ANON(s2_).end(); ++i)
|
||||
|
||||
/*
|
||||
* Similar to FOR_EACH, but iterates the container backwards by
|
||||
* using rbegin() and rend().
|
||||
*/
|
||||
#define FOR_EACH_R(i, c) \
|
||||
if (bool FOR_EACH_R_state1 = false) {} else \
|
||||
for (auto && FOR_EACH_R_state2 = (c); \
|
||||
!FOR_EACH_R_state1; FOR_EACH_R_state1 = true) \
|
||||
for (auto i = FOR_EACH_R_state2.rbegin(); \
|
||||
i != FOR_EACH_R_state2.rend(); ++i)
|
||||
|
||||
/*
|
||||
* Similar to FOR_EACH but also allows client to specify a 'count' variable
|
||||
* to track the current iteration in the loop (starting at zero).
|
||||
* Similar to python's enumerate() function. For example:
|
||||
* string commaSeparatedValues = "VALUES: ";
|
||||
* FOR_EACH_ENUMERATE(ii, value, columns) { // don't want comma at the end!
|
||||
* commaSeparatedValues += (ii == 0) ? *value : string(",") + *value;
|
||||
* }
|
||||
*/
|
||||
#define FOR_EACH_ENUMERATE(count, i, c) \
|
||||
if (bool FOR_EACH_state1 = false) {} else \
|
||||
for (auto && FOR_EACH_state2 = (c); \
|
||||
!FOR_EACH_state1; FOR_EACH_state1 = true) \
|
||||
if (size_t FOR_EACH_privateCount = 0) {} else \
|
||||
if (const size_t& count = FOR_EACH_privateCount) {} else \
|
||||
for (auto i = FOR_EACH_state2.begin(); \
|
||||
i != FOR_EACH_state2.end(); ++FOR_EACH_privateCount, ++i)
|
||||
|
||||
/**
|
||||
* Similar to FOR_EACH, but gives the user the key and value for each entry in
|
||||
* the container, instead of just the iterator to the entry. For example:
|
||||
* map<string, string> testMap;
|
||||
* FOR_EACH_KV(key, value, testMap) {
|
||||
* cout << key << " " << value;
|
||||
* }
|
||||
*/
|
||||
#define FOR_EACH_KV(k, v, c) \
|
||||
if (unsigned int FOR_EACH_state1 = 0) {} else \
|
||||
for (auto && FOR_EACH_state2 = (c); \
|
||||
!FOR_EACH_state1; FOR_EACH_state1 = 1) \
|
||||
for (auto FOR_EACH_state3 = FOR_EACH_state2.begin(); \
|
||||
FOR_EACH_state3 != FOR_EACH_state2.end(); \
|
||||
FOR_EACH_state1 == 2 \
|
||||
? ((FOR_EACH_state1 = 0), ++FOR_EACH_state3) \
|
||||
: (FOR_EACH_state3 = FOR_EACH_state2.end())) \
|
||||
for (auto &k = FOR_EACH_state3->first; \
|
||||
!FOR_EACH_state1; ++FOR_EACH_state1) \
|
||||
for (auto &v = FOR_EACH_state3->second; \
|
||||
!FOR_EACH_state1; ++FOR_EACH_state1)
|
||||
|
||||
namespace folly { namespace detail {
|
||||
|
||||
// Boost 1.48 lacks has_less, we emulate a subset of it here.
|
||||
template <typename T, typename U>
|
||||
class HasLess {
|
||||
struct BiggerThanChar { char unused[2]; };
|
||||
template <typename C, typename D> static char test(decltype(C() < D())*);
|
||||
template <typename, typename> static BiggerThanChar test(...);
|
||||
public:
|
||||
enum { value = sizeof(test<T, U>(0)) == 1 };
|
||||
};
|
||||
|
||||
/**
|
||||
* notThereYet helps the FOR_EACH_RANGE macro by opportunistically
|
||||
* using "<" instead of "!=" whenever available when checking for loop
|
||||
* termination. This makes e.g. examples such as FOR_EACH_RANGE (i,
|
||||
* 10, 5) execute zero iterations instead of looping virtually
|
||||
* forever. At the same time, some iterator types define "!=" but not
|
||||
* "<". The notThereYet function will dispatch differently for those.
|
||||
*
|
||||
* Below is the correct implementation of notThereYet. It is disabled
|
||||
* because of a bug in Boost 1.46: The filesystem::path::iterator
|
||||
* defines operator< (via boost::iterator_facade), but that in turn
|
||||
* uses distance_to which is undefined for that particular
|
||||
* iterator. So HasLess (defined above) identifies
|
||||
* boost::filesystem::path as properly comparable with <, but in fact
|
||||
* attempting to do so will yield a compile-time error.
|
||||
*
|
||||
* The else branch (active) contains a conservative
|
||||
* implementation.
|
||||
*/
|
||||
|
||||
#if 0
|
||||
|
||||
template <class T, class U>
|
||||
typename std::enable_if<HasLess<T, U>::value, bool>::type
|
||||
notThereYet(T& iter, const U& end) {
|
||||
return iter < end;
|
||||
}
|
||||
|
||||
template <class T, class U>
|
||||
typename std::enable_if<!HasLess<T, U>::value, bool>::type
|
||||
notThereYet(T& iter, const U& end) {
|
||||
return iter != end;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
template <class T, class U>
|
||||
typename std::enable_if<
|
||||
(std::is_arithmetic<T>::value && std::is_arithmetic<U>::value) ||
|
||||
(std::is_pointer<T>::value && std::is_pointer<U>::value),
|
||||
bool>::type
|
||||
notThereYet(T& iter, const U& end) {
|
||||
return iter < end;
|
||||
}
|
||||
|
||||
template <class T, class U>
|
||||
typename std::enable_if<
|
||||
!(
|
||||
(std::is_arithmetic<T>::value && std::is_arithmetic<U>::value) ||
|
||||
(std::is_pointer<T>::value && std::is_pointer<U>::value)
|
||||
),
|
||||
bool>::type
|
||||
notThereYet(T& iter, const U& end) {
|
||||
return iter != end;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/**
|
||||
* downTo is similar to notThereYet, but in reverse - it helps the
|
||||
* FOR_EACH_RANGE_R macro.
|
||||
*/
|
||||
template <class T, class U>
|
||||
typename std::enable_if<HasLess<U, T>::value, bool>::type
|
||||
downTo(T& iter, const U& begin) {
|
||||
return begin < iter--;
|
||||
}
|
||||
|
||||
template <class T, class U>
|
||||
typename std::enable_if<!HasLess<U, T>::value, bool>::type
|
||||
downTo(T& iter, const U& begin) {
|
||||
if (iter == begin) return false;
|
||||
--iter;
|
||||
return true;
|
||||
}
|
||||
|
||||
} }
|
||||
|
||||
/*
|
||||
* Iteration with given limits. end is assumed to be reachable from
|
||||
* begin. end is evaluated every pass through the loop.
|
||||
*
|
||||
* NOTE: The type of the loop variable should be the common type of "begin"
|
||||
* and "end". e.g. If "begin" is "int" but "end" is "long", we want "i"
|
||||
* to be "long". This is done by getting the type of (true ? begin : end)
|
||||
*/
|
||||
#define FOR_EACH_RANGE(i, begin, end) \
|
||||
for (auto i = (true ? (begin) : (end)); \
|
||||
::folly::detail::notThereYet(i, (end)); \
|
||||
++i)
|
||||
|
||||
/*
|
||||
* Iteration with given limits. begin is assumed to be reachable from
|
||||
* end by successive decrements. begin is evaluated every pass through
|
||||
* the loop.
|
||||
*
|
||||
* NOTE: The type of the loop variable should be the common type of "begin"
|
||||
* and "end". e.g. If "begin" is "int" but "end" is "long", we want "i"
|
||||
* to be "long". This is done by getting the type of (false ? begin : end)
|
||||
*/
|
||||
#define FOR_EACH_RANGE_R(i, begin, end) \
|
||||
for (auto i = (false ? (begin) : (end)); ::folly::detail::downTo(i, (begin));)
|
698
ios/Pods/Folly/folly/Format-inl.h
generated
698
ios/Pods/Folly/folly/Format-inl.h
generated
File diff suppressed because it is too large
Load Diff
427
ios/Pods/Folly/folly/Format.cpp
generated
Normal file
427
ios/Pods/Folly/folly/Format.cpp
generated
Normal file
@ -0,0 +1,427 @@
|
||||
/*
|
||||
* Copyright 2012-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <folly/Format.h>
|
||||
|
||||
#include <folly/ConstexprMath.h>
|
||||
#include <folly/CppAttributes.h>
|
||||
#include <folly/container/Array.h>
|
||||
|
||||
#include <double-conversion/double-conversion.h>
|
||||
|
||||
namespace folly {
|
||||
namespace detail {
|
||||
|
||||
// ctor for items in the align table
|
||||
struct format_table_align_make_item {
|
||||
static constexpr std::size_t size = 256;
|
||||
constexpr FormatArg::Align operator()(std::size_t index) const {
|
||||
// clang-format off
|
||||
return
|
||||
index == '<' ? FormatArg::Align::LEFT:
|
||||
index == '>' ? FormatArg::Align::RIGHT :
|
||||
index == '=' ? FormatArg::Align::PAD_AFTER_SIGN :
|
||||
index == '^' ? FormatArg::Align::CENTER :
|
||||
FormatArg::Align::INVALID;
|
||||
// clang-format on
|
||||
}
|
||||
};
|
||||
|
||||
// ctor for items in the conv tables for representing parts of nonnegative
|
||||
// integers into ascii digits of length Size, over a given base Base
|
||||
template <std::size_t Base, std::size_t Size, bool Upper = false>
|
||||
struct format_table_conv_make_item {
|
||||
static_assert(Base <= 36, "Base is unrepresentable");
|
||||
struct make_item {
|
||||
std::size_t index{};
|
||||
constexpr explicit make_item(std::size_t index_) : index(index_) {} // gcc49
|
||||
constexpr char alpha(std::size_t ord) const {
|
||||
return ord < 10 ? '0' + ord : (Upper ? 'A' : 'a') + (ord - 10);
|
||||
}
|
||||
constexpr char operator()(std::size_t offset) const {
|
||||
return alpha(index / constexpr_pow(Base, Size - offset - 1) % Base);
|
||||
}
|
||||
};
|
||||
constexpr std::array<char, Size> operator()(std::size_t index) const {
|
||||
return make_array_with<Size>(make_item{index});
|
||||
}
|
||||
};
|
||||
|
||||
// ctor for items in the sign table
|
||||
struct format_table_sign_make_item {
|
||||
static constexpr std::size_t size = 256;
|
||||
constexpr FormatArg::Sign operator()(std::size_t index) const {
|
||||
// clang-format off
|
||||
return
|
||||
index == '+' ? FormatArg::Sign::PLUS_OR_MINUS :
|
||||
index == '-' ? FormatArg::Sign::MINUS :
|
||||
index == ' ' ? FormatArg::Sign::SPACE_OR_MINUS :
|
||||
FormatArg::Sign::INVALID;
|
||||
// clang-format on
|
||||
}
|
||||
};
|
||||
|
||||
// the tables
|
||||
FOLLY_STORAGE_CONSTEXPR auto formatAlignTable =
|
||||
make_array_with<256>(format_table_align_make_item{});
|
||||
FOLLY_STORAGE_CONSTEXPR auto formatSignTable =
|
||||
make_array_with<256>(format_table_sign_make_item{});
|
||||
FOLLY_STORAGE_CONSTEXPR decltype(formatHexLower) formatHexLower =
|
||||
make_array_with<256>(format_table_conv_make_item<16, 2, false>{});
|
||||
FOLLY_STORAGE_CONSTEXPR decltype(formatHexUpper) formatHexUpper =
|
||||
make_array_with<256>(format_table_conv_make_item<16, 2, true>{});
|
||||
FOLLY_STORAGE_CONSTEXPR decltype(formatOctal) formatOctal =
|
||||
make_array_with<512>(format_table_conv_make_item<8, 3>{});
|
||||
FOLLY_STORAGE_CONSTEXPR decltype(formatBinary) formatBinary =
|
||||
make_array_with<256>(format_table_conv_make_item<2, 8>{});
|
||||
|
||||
} // namespace detail
|
||||
|
||||
using namespace folly::detail;
|
||||
|
||||
void FormatValue<double>::formatHelper(
|
||||
fbstring& piece,
|
||||
int& prefixLen,
|
||||
FormatArg& arg) const {
|
||||
using ::double_conversion::DoubleToStringConverter;
|
||||
using ::double_conversion::StringBuilder;
|
||||
|
||||
arg.validate(FormatArg::Type::FLOAT);
|
||||
|
||||
if (arg.presentation == FormatArg::kDefaultPresentation) {
|
||||
arg.presentation = 'g';
|
||||
}
|
||||
|
||||
const char* infinitySymbol = isupper(arg.presentation) ? "INF" : "inf";
|
||||
const char* nanSymbol = isupper(arg.presentation) ? "NAN" : "nan";
|
||||
char exponentSymbol = isupper(arg.presentation) ? 'E' : 'e';
|
||||
|
||||
if (arg.precision == FormatArg::kDefaultPrecision) {
|
||||
arg.precision = 6;
|
||||
}
|
||||
|
||||
// 2+: for null terminator and optional sign shenanigans.
|
||||
constexpr int bufLen = 2 +
|
||||
constexpr_max(2 + DoubleToStringConverter::kMaxFixedDigitsBeforePoint +
|
||||
DoubleToStringConverter::kMaxFixedDigitsAfterPoint,
|
||||
constexpr_max(
|
||||
8 + DoubleToStringConverter::kMaxExponentialDigits,
|
||||
7 + DoubleToStringConverter::kMaxPrecisionDigits));
|
||||
char buf[bufLen];
|
||||
StringBuilder builder(buf + 1, bufLen - 1);
|
||||
|
||||
char plusSign;
|
||||
switch (arg.sign) {
|
||||
case FormatArg::Sign::PLUS_OR_MINUS:
|
||||
plusSign = '+';
|
||||
break;
|
||||
case FormatArg::Sign::SPACE_OR_MINUS:
|
||||
plusSign = ' ';
|
||||
break;
|
||||
default:
|
||||
plusSign = '\0';
|
||||
break;
|
||||
};
|
||||
|
||||
auto flags = DoubleToStringConverter::EMIT_POSITIVE_EXPONENT_SIGN |
|
||||
(arg.trailingDot ? DoubleToStringConverter::EMIT_TRAILING_DECIMAL_POINT
|
||||
: 0);
|
||||
|
||||
double val = val_;
|
||||
switch (arg.presentation) {
|
||||
case '%':
|
||||
val *= 100;
|
||||
FOLLY_FALLTHROUGH;
|
||||
case 'f':
|
||||
case 'F': {
|
||||
if (arg.precision > DoubleToStringConverter::kMaxFixedDigitsAfterPoint) {
|
||||
arg.precision = DoubleToStringConverter::kMaxFixedDigitsAfterPoint;
|
||||
}
|
||||
DoubleToStringConverter conv(
|
||||
flags,
|
||||
infinitySymbol,
|
||||
nanSymbol,
|
||||
exponentSymbol,
|
||||
-4,
|
||||
arg.precision,
|
||||
0,
|
||||
0);
|
||||
arg.enforce(
|
||||
conv.ToFixed(val, arg.precision, &builder),
|
||||
"fixed double conversion failed");
|
||||
break;
|
||||
}
|
||||
case 'e':
|
||||
case 'E': {
|
||||
if (arg.precision > DoubleToStringConverter::kMaxExponentialDigits) {
|
||||
arg.precision = DoubleToStringConverter::kMaxExponentialDigits;
|
||||
}
|
||||
|
||||
DoubleToStringConverter conv(
|
||||
flags,
|
||||
infinitySymbol,
|
||||
nanSymbol,
|
||||
exponentSymbol,
|
||||
-4,
|
||||
arg.precision,
|
||||
0,
|
||||
0);
|
||||
arg.enforce(conv.ToExponential(val, arg.precision, &builder));
|
||||
break;
|
||||
}
|
||||
case 'n': // should be locale-aware, but isn't
|
||||
case 'g':
|
||||
case 'G': {
|
||||
if (arg.precision < DoubleToStringConverter::kMinPrecisionDigits) {
|
||||
arg.precision = DoubleToStringConverter::kMinPrecisionDigits;
|
||||
} else if (arg.precision > DoubleToStringConverter::kMaxPrecisionDigits) {
|
||||
arg.precision = DoubleToStringConverter::kMaxPrecisionDigits;
|
||||
}
|
||||
DoubleToStringConverter conv(
|
||||
flags,
|
||||
infinitySymbol,
|
||||
nanSymbol,
|
||||
exponentSymbol,
|
||||
-4,
|
||||
arg.precision,
|
||||
0,
|
||||
0);
|
||||
arg.enforce(conv.ToShortest(val, &builder));
|
||||
break;
|
||||
}
|
||||
default:
|
||||
arg.error("invalid specifier '", arg.presentation, "'");
|
||||
}
|
||||
|
||||
int len = builder.position();
|
||||
builder.Finalize();
|
||||
DCHECK_GT(len, 0);
|
||||
|
||||
// Add '+' or ' ' sign if needed
|
||||
char* p = buf + 1;
|
||||
// anything that's neither negative nor nan
|
||||
prefixLen = 0;
|
||||
if (plusSign && (*p != '-' && *p != 'n' && *p != 'N')) {
|
||||
*--p = plusSign;
|
||||
++len;
|
||||
prefixLen = 1;
|
||||
} else if (*p == '-') {
|
||||
prefixLen = 1;
|
||||
}
|
||||
|
||||
piece = fbstring(p, size_t(len));
|
||||
}
|
||||
|
||||
void FormatArg::initSlow() {
|
||||
auto b = fullArgString.begin();
|
||||
auto end = fullArgString.end();
|
||||
|
||||
// Parse key
|
||||
auto p = static_cast<const char*>(memchr(b, ':', size_t(end - b)));
|
||||
if (!p) {
|
||||
key_ = StringPiece(b, end);
|
||||
return;
|
||||
}
|
||||
key_ = StringPiece(b, p);
|
||||
|
||||
if (*p == ':') {
|
||||
// parse format spec
|
||||
if (++p == end) {
|
||||
return;
|
||||
}
|
||||
|
||||
// fill/align, or just align
|
||||
Align a;
|
||||
if (p + 1 != end &&
|
||||
(a = formatAlignTable[static_cast<unsigned char>(p[1])]) !=
|
||||
Align::INVALID) {
|
||||
fill = *p;
|
||||
align = a;
|
||||
p += 2;
|
||||
if (p == end) {
|
||||
return;
|
||||
}
|
||||
} else if (
|
||||
(a = formatAlignTable[static_cast<unsigned char>(*p)]) !=
|
||||
Align::INVALID) {
|
||||
align = a;
|
||||
if (++p == end) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
Sign s;
|
||||
unsigned char uSign = static_cast<unsigned char>(*p);
|
||||
if ((s = formatSignTable[uSign]) != Sign::INVALID) {
|
||||
sign = s;
|
||||
if (++p == end) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (*p == '#') {
|
||||
basePrefix = true;
|
||||
if (++p == end) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (*p == '0') {
|
||||
enforce(align == Align::DEFAULT, "alignment specified twice");
|
||||
fill = '0';
|
||||
align = Align::PAD_AFTER_SIGN;
|
||||
if (++p == end) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
auto readInt = [&] {
|
||||
auto const c = p;
|
||||
do {
|
||||
++p;
|
||||
} while (p != end && *p >= '0' && *p <= '9');
|
||||
return to<int>(StringPiece(c, p));
|
||||
};
|
||||
|
||||
if (*p == '*') {
|
||||
width = kDynamicWidth;
|
||||
++p;
|
||||
|
||||
if (p == end) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (*p >= '0' && *p <= '9') {
|
||||
widthIndex = readInt();
|
||||
}
|
||||
|
||||
if (p == end) {
|
||||
return;
|
||||
}
|
||||
} else if (*p >= '0' && *p <= '9') {
|
||||
width = readInt();
|
||||
|
||||
if (p == end) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (*p == ',') {
|
||||
thousandsSeparator = true;
|
||||
if (++p == end) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (*p == '.') {
|
||||
auto d = ++p;
|
||||
while (p != end && *p >= '0' && *p <= '9') {
|
||||
++p;
|
||||
}
|
||||
if (p != d) {
|
||||
precision = to<int>(StringPiece(d, p));
|
||||
if (p != end && *p == '.') {
|
||||
trailingDot = true;
|
||||
++p;
|
||||
}
|
||||
} else {
|
||||
trailingDot = true;
|
||||
}
|
||||
|
||||
if (p == end) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
presentation = *p;
|
||||
if (++p == end) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
error("extra characters in format string");
|
||||
}
|
||||
|
||||
void FormatArg::validate(Type type) const {
|
||||
enforce(keyEmpty(), "index not allowed");
|
||||
switch (type) {
|
||||
case Type::INTEGER:
|
||||
enforce(
|
||||
precision == kDefaultPrecision, "precision not allowed on integers");
|
||||
break;
|
||||
case Type::FLOAT:
|
||||
enforce(
|
||||
!basePrefix, "base prefix ('#') specifier only allowed on integers");
|
||||
enforce(
|
||||
!thousandsSeparator,
|
||||
"thousands separator (',') only allowed on integers");
|
||||
break;
|
||||
case Type::OTHER:
|
||||
enforce(
|
||||
align != Align::PAD_AFTER_SIGN,
|
||||
"'='alignment only allowed on numbers");
|
||||
enforce(sign == Sign::DEFAULT, "sign specifier only allowed on numbers");
|
||||
enforce(
|
||||
!basePrefix, "base prefix ('#') specifier only allowed on integers");
|
||||
enforce(
|
||||
!thousandsSeparator,
|
||||
"thousands separator (',') only allowed on integers");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
namespace detail {
|
||||
void insertThousandsGroupingUnsafe(char* start_buffer, char** end_buffer) {
|
||||
uint32_t remaining_digits = uint32_t(*end_buffer - start_buffer);
|
||||
uint32_t separator_size = (remaining_digits - 1) / 3;
|
||||
uint32_t result_size = remaining_digits + separator_size;
|
||||
*end_buffer = *end_buffer + separator_size;
|
||||
|
||||
// get the end of the new string with the separators
|
||||
uint32_t buffer_write_index = result_size - 1;
|
||||
uint32_t buffer_read_index = remaining_digits - 1;
|
||||
start_buffer[buffer_write_index + 1] = 0;
|
||||
|
||||
bool done = false;
|
||||
uint32_t next_group_size = 3;
|
||||
|
||||
while (!done) {
|
||||
uint32_t current_group_size = std::max<uint32_t>(
|
||||
1, std::min<uint32_t>(remaining_digits, next_group_size));
|
||||
|
||||
// write out the current group's digits to the buffer index
|
||||
for (uint32_t i = 0; i < current_group_size; i++) {
|
||||
start_buffer[buffer_write_index--] = start_buffer[buffer_read_index--];
|
||||
}
|
||||
|
||||
// if not finished, write the separator before the next group
|
||||
if (buffer_write_index < buffer_write_index + 1) {
|
||||
start_buffer[buffer_write_index--] = ',';
|
||||
} else {
|
||||
done = true;
|
||||
}
|
||||
|
||||
remaining_digits -= current_group_size;
|
||||
}
|
||||
}
|
||||
} // namespace detail
|
||||
|
||||
FormatKeyNotFoundException::FormatKeyNotFoundException(StringPiece key)
|
||||
: std::out_of_range(kMessagePrefix.str() + key.str()) {}
|
||||
|
||||
constexpr StringPiece const FormatKeyNotFoundException::kMessagePrefix;
|
||||
|
||||
} // namespace folly
|
208
ios/Pods/Folly/folly/Format.h
generated
208
ios/Pods/Folly/folly/Format.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2012-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -18,45 +18,55 @@
|
||||
#define FOLLY_FORMAT_H_
|
||||
|
||||
#include <cstdio>
|
||||
#include <stdexcept>
|
||||
#include <tuple>
|
||||
#include <type_traits>
|
||||
|
||||
#include <folly/CPortability.h>
|
||||
#include <folly/Conv.h>
|
||||
#include <folly/Range.h>
|
||||
#include <folly/Traits.h>
|
||||
#include <folly/String.h>
|
||||
#include <folly/FormatArg.h>
|
||||
#include <folly/Range.h>
|
||||
#include <folly/String.h>
|
||||
#include <folly/Traits.h>
|
||||
|
||||
// Ignore shadowing warnings within this file, so includers can use -Wshadow.
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wshadow"
|
||||
FOLLY_PUSH_WARNING
|
||||
FOLLY_GNU_DISABLE_WARNING("-Wshadow")
|
||||
|
||||
namespace folly {
|
||||
|
||||
// forward declarations
|
||||
template <bool containerMode, class... Args> class Formatter;
|
||||
template <bool containerMode, class... Args>
|
||||
class Formatter;
|
||||
template <class... Args>
|
||||
Formatter<false, Args...> format(StringPiece fmt, Args&&... args);
|
||||
template <class C>
|
||||
Formatter<true, C> vformat(StringPiece fmt, C&& container);
|
||||
template <class T, class Enable=void> class FormatValue;
|
||||
template <class T, class Enable = void>
|
||||
class FormatValue;
|
||||
|
||||
// meta-attribute to identify formatters in this sea of template weirdness
|
||||
namespace detail {
|
||||
class FormatterTag {};
|
||||
};
|
||||
} // namespace detail
|
||||
|
||||
/**
|
||||
* Formatter class.
|
||||
*
|
||||
* Note that this class is tricky, as it keeps *references* to its arguments
|
||||
* (and doesn't copy the passed-in format string). Thankfully, you can't use
|
||||
* this directly, you have to use format(...) below.
|
||||
* Note that this class is tricky, as it keeps *references* to its lvalue
|
||||
* arguments (while it takes ownership of the temporaries), and it doesn't
|
||||
* copy the passed-in format string. Thankfully, you can't use this
|
||||
* directly, you have to use format(...) below.
|
||||
*/
|
||||
|
||||
/* BaseFormatter class. Currently, the only behavior that can be
|
||||
* overridden is the actual formatting of positional parameters in
|
||||
/* BaseFormatter class.
|
||||
* Overridable behaviours:
|
||||
* You may override the actual formatting of positional parameters in
|
||||
* `doFormatArg`. The Formatter class provides the default implementation.
|
||||
*
|
||||
* You may also override `doFormat` and `getSizeArg`. These override points were
|
||||
* added to permit static analysis of format strings, when it is inconvenient
|
||||
* or impossible to instantiate a BaseFormatter with the correct storage
|
||||
*/
|
||||
template <class Derived, bool containerMode, class... Args>
|
||||
class BaseFormatter {
|
||||
@ -71,9 +81,9 @@ class BaseFormatter {
|
||||
* Append to a string.
|
||||
*/
|
||||
template <class Str>
|
||||
typename std::enable_if<IsSomeString<Str>::value>::type
|
||||
appendTo(Str& str) const {
|
||||
auto appender = [&str] (StringPiece s) { str.append(s.data(), s.size()); };
|
||||
typename std::enable_if<IsSomeString<Str>::value>::type appendTo(
|
||||
Str& str) const {
|
||||
auto appender = [&str](StringPiece s) { str.append(s.data(), s.size()); };
|
||||
(*this)(appender);
|
||||
}
|
||||
|
||||
@ -96,16 +106,19 @@ class BaseFormatter {
|
||||
}
|
||||
|
||||
/**
|
||||
* metadata to identify generated children of BaseFormatter
|
||||
* Metadata to identify generated children of BaseFormatter
|
||||
*/
|
||||
typedef detail::FormatterTag IsFormatter;
|
||||
typedef BaseFormatter BaseType;
|
||||
|
||||
private:
|
||||
typedef std::tuple<FormatValue<
|
||||
typename std::decay<Args>::type>...> ValueTuple;
|
||||
typedef std::tuple<Args...> ValueTuple;
|
||||
static constexpr size_t valueCount = std::tuple_size<ValueTuple>::value;
|
||||
|
||||
Derived const& asDerived() const {
|
||||
return *static_cast<const Derived*>(this);
|
||||
}
|
||||
|
||||
template <size_t K, class Callback>
|
||||
typename std::enable_if<K == valueCount>::type
|
||||
doFormatFrom(size_t i, FormatArg& arg, Callback& /*cb*/) const {
|
||||
@ -116,9 +129,9 @@ class BaseFormatter {
|
||||
typename std::enable_if<(K < valueCount)>::type
|
||||
doFormatFrom(size_t i, FormatArg& arg, Callback& cb) const {
|
||||
if (i == K) {
|
||||
static_cast<const Derived*>(this)->template doFormatArg<K>(arg, cb);
|
||||
asDerived().template doFormatArg<K>(arg, cb);
|
||||
} else {
|
||||
doFormatFrom<K+1>(i, arg, cb);
|
||||
doFormatFrom<K + 1>(i, arg, cb);
|
||||
}
|
||||
}
|
||||
|
||||
@ -128,32 +141,36 @@ class BaseFormatter {
|
||||
}
|
||||
|
||||
template <size_t K>
|
||||
typename std::enable_if<K == valueCount, int>::type
|
||||
getSizeArgFrom(size_t i, const FormatArg& arg) const {
|
||||
typename std::enable_if<K == valueCount, int>::type getSizeArgFrom(
|
||||
size_t i,
|
||||
const FormatArg& arg) const {
|
||||
arg.error("argument index out of range, max=", i);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
typename std::enable_if<std::is_integral<T>::value &&
|
||||
!std::is_same<T, bool>::value, int>::type
|
||||
typename std::enable_if<
|
||||
std::is_integral<T>::value && !std::is_same<T, bool>::value,
|
||||
int>::type
|
||||
getValue(const FormatValue<T>& format, const FormatArg&) const {
|
||||
return static_cast<int>(format.getValue());
|
||||
}
|
||||
|
||||
template <class T>
|
||||
typename std::enable_if<!std::is_integral<T>::value ||
|
||||
std::is_same<T, bool>::value, int>::type
|
||||
typename std::enable_if<
|
||||
!std::is_integral<T>::value || std::is_same<T, bool>::value,
|
||||
int>::type
|
||||
getValue(const FormatValue<T>&, const FormatArg& arg) const {
|
||||
arg.error("dynamic field width argument must be integral");
|
||||
}
|
||||
|
||||
template <size_t K>
|
||||
typename std::enable_if<K < valueCount, int>::type
|
||||
getSizeArgFrom(size_t i, const FormatArg& arg) const {
|
||||
typename std::enable_if <
|
||||
K<valueCount, int>::type getSizeArgFrom(size_t i, const FormatArg& arg)
|
||||
const {
|
||||
if (i == K) {
|
||||
return getValue(std::get<K>(values_), arg);
|
||||
return getValue(getFormatValue<K>(), arg);
|
||||
}
|
||||
return getSizeArgFrom<K+1>(i, arg);
|
||||
return getSizeArgFrom<K + 1>(i, arg);
|
||||
}
|
||||
|
||||
int getSizeArg(size_t i, const FormatArg& arg) const {
|
||||
@ -173,31 +190,47 @@ class BaseFormatter {
|
||||
// for the exclusive use of format() (below). This way, you can't create
|
||||
// a Formatter object, but can handle references to it (for streaming,
|
||||
// conversion to string, etc) -- which is good, as Formatter objects are
|
||||
// dangerous (they hold references, possibly to temporaries)
|
||||
// dangerous (they may hold references).
|
||||
BaseFormatter(BaseFormatter&&) = default;
|
||||
BaseFormatter& operator=(BaseFormatter&&) = default;
|
||||
|
||||
template <size_t K>
|
||||
using ArgType = typename std::tuple_element<K, ValueTuple>::type;
|
||||
|
||||
template <size_t K>
|
||||
FormatValue<typename std::decay<ArgType<K>>::type> getFormatValue() const {
|
||||
return FormatValue<typename std::decay<ArgType<K>>::type>(
|
||||
std::get<K>(values_));
|
||||
}
|
||||
|
||||
ValueTuple values_;
|
||||
};
|
||||
|
||||
template <bool containerMode, class... Args>
|
||||
class Formatter : public BaseFormatter<Formatter<containerMode, Args...>,
|
||||
containerMode,
|
||||
Args...> {
|
||||
class Formatter : public BaseFormatter<
|
||||
Formatter<containerMode, Args...>,
|
||||
containerMode,
|
||||
Args...> {
|
||||
private:
|
||||
explicit Formatter(StringPiece& str, Args&&... args)
|
||||
: BaseFormatter<Formatter<containerMode, Args...>,
|
||||
containerMode,
|
||||
Args...>(str, std::forward<Args>(args)...) {}
|
||||
: BaseFormatter<
|
||||
Formatter<containerMode, Args...>,
|
||||
containerMode,
|
||||
Args...>(str, std::forward<Args>(args)...) {
|
||||
static_assert(
|
||||
!containerMode || sizeof...(Args) == 1,
|
||||
"Exactly one argument required in container mode");
|
||||
}
|
||||
|
||||
template <size_t K, class Callback>
|
||||
void doFormatArg(FormatArg& arg, Callback& cb) const {
|
||||
std::get<K>(this->values_).format(arg, cb);
|
||||
this->template getFormatValue<K>().format(arg, cb);
|
||||
}
|
||||
|
||||
friend class BaseFormatter<Formatter<containerMode, Args...>,
|
||||
containerMode,
|
||||
Args...>;
|
||||
friend class BaseFormatter<
|
||||
Formatter<containerMode, Args...>,
|
||||
containerMode,
|
||||
Args...>;
|
||||
|
||||
template <class... A>
|
||||
friend Formatter<false, A...> format(StringPiece fmt, A&&... arg);
|
||||
@ -208,10 +241,13 @@ class Formatter : public BaseFormatter<Formatter<containerMode, Args...>,
|
||||
/**
|
||||
* Formatter objects can be written to streams.
|
||||
*/
|
||||
template<bool containerMode, class... Args>
|
||||
std::ostream& operator<<(std::ostream& out,
|
||||
const Formatter<containerMode, Args...>& formatter) {
|
||||
auto writer = [&out] (StringPiece sp) { out.write(sp.data(), sp.size()); };
|
||||
template <bool containerMode, class... Args>
|
||||
std::ostream& operator<<(
|
||||
std::ostream& out,
|
||||
const Formatter<containerMode, Args...>& formatter) {
|
||||
auto writer = [&out](StringPiece sp) {
|
||||
out.write(sp.data(), std::streamsize(sp.size()));
|
||||
};
|
||||
formatter(writer);
|
||||
return out;
|
||||
}
|
||||
@ -220,8 +256,9 @@ std::ostream& operator<<(std::ostream& out,
|
||||
* Formatter objects can be written to stdio FILEs.
|
||||
*/
|
||||
template <class Derived, bool containerMode, class... Args>
|
||||
void writeTo(FILE* fp,
|
||||
const BaseFormatter<Derived, containerMode, Args...>& formatter);
|
||||
void writeTo(
|
||||
FILE* fp,
|
||||
const BaseFormatter<Derived, containerMode, Args...>& formatter);
|
||||
|
||||
/**
|
||||
* Create a formatter object.
|
||||
@ -232,8 +269,7 @@ void writeTo(FILE* fp,
|
||||
*/
|
||||
template <class... Args>
|
||||
Formatter<false, Args...> format(StringPiece fmt, Args&&... args) {
|
||||
return Formatter<false, Args...>(
|
||||
fmt, std::forward<Args>(args)...);
|
||||
return Formatter<false, Args...>(fmt, std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -260,8 +296,7 @@ inline std::string sformat(StringPiece fmt, Args&&... args) {
|
||||
*/
|
||||
template <class Container>
|
||||
Formatter<true, Container> vformat(StringPiece fmt, Container&& container) {
|
||||
return Formatter<true, Container>(
|
||||
fmt, std::forward<Container>(container));
|
||||
return Formatter<true, Container>(fmt, std::forward<Container>(container));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -273,6 +308,29 @@ inline std::string svformat(StringPiece fmt, Container&& container) {
|
||||
return vformat(fmt, std::forward<Container>(container)).str();
|
||||
}
|
||||
|
||||
/**
|
||||
* Exception class thrown when a format key is not found in the given
|
||||
* associative container keyed by strings. We inherit std::out_of_range for
|
||||
* compatibility with callers that expect exception to be thrown directly
|
||||
* by std::map or std::unordered_map.
|
||||
*
|
||||
* Having the key be at the end of the message string, we can access it by
|
||||
* simply adding its offset to what(). Not storing separate std::string key
|
||||
* makes the exception type small and noexcept-copyable like std::out_of_range,
|
||||
* and therefore able to fit in-situ in exception_wrapper.
|
||||
*/
|
||||
class FOLLY_EXPORT FormatKeyNotFoundException : public std::out_of_range {
|
||||
public:
|
||||
explicit FormatKeyNotFoundException(StringPiece key);
|
||||
|
||||
char const* key() const noexcept {
|
||||
return what() + kMessagePrefix.size();
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr StringPiece const kMessagePrefix = "format key not found: ";
|
||||
};
|
||||
|
||||
/**
|
||||
* Wrap a sequence or associative container so that out-of-range lookups
|
||||
* return a default value rather than throwing an exception.
|
||||
@ -281,20 +339,20 @@ inline std::string svformat(StringPiece fmt, Container&& container) {
|
||||
* format("[no_such_key"], defaulted(map, 42)) -> 42
|
||||
*/
|
||||
namespace detail {
|
||||
template <class Container, class Value> struct DefaultValueWrapper {
|
||||
template <class Container, class Value>
|
||||
struct DefaultValueWrapper {
|
||||
DefaultValueWrapper(const Container& container, const Value& defaultValue)
|
||||
: container(container),
|
||||
defaultValue(defaultValue) {
|
||||
}
|
||||
: container(container), defaultValue(defaultValue) {}
|
||||
|
||||
const Container& container;
|
||||
const Value& defaultValue;
|
||||
};
|
||||
} // namespace
|
||||
} // namespace detail
|
||||
|
||||
template <class Container, class Value>
|
||||
detail::DefaultValueWrapper<Container, Value>
|
||||
defaulted(const Container& c, const Value& v) {
|
||||
detail::DefaultValueWrapper<Container, Value> defaulted(
|
||||
const Container& c,
|
||||
const Value& v) {
|
||||
return detail::DefaultValueWrapper<Container, Value>(c, v);
|
||||
}
|
||||
|
||||
@ -343,25 +401,28 @@ void formatString(StringPiece val, FormatArg& arg, FormatCallback& cb);
|
||||
* field width")
|
||||
*/
|
||||
template <class FormatCallback>
|
||||
void formatNumber(StringPiece val, int prefixLen, FormatArg& arg,
|
||||
FormatCallback& cb);
|
||||
|
||||
void formatNumber(
|
||||
StringPiece val,
|
||||
int prefixLen,
|
||||
FormatArg& arg,
|
||||
FormatCallback& cb);
|
||||
|
||||
/**
|
||||
* Format a Formatter object recursively. Behaves just like
|
||||
* formatString(fmt.str(), arg, cb); but avoids creating a temporary
|
||||
* string if possible.
|
||||
*/
|
||||
template <class FormatCallback,
|
||||
class Derived,
|
||||
bool containerMode,
|
||||
class... Args>
|
||||
template <
|
||||
class FormatCallback,
|
||||
class Derived,
|
||||
bool containerMode,
|
||||
class... Args>
|
||||
void formatFormatter(
|
||||
const BaseFormatter<Derived, containerMode, Args...>& formatter,
|
||||
FormatArg& arg,
|
||||
FormatCallback& cb);
|
||||
|
||||
} // namespace format_value
|
||||
} // namespace format_value
|
||||
|
||||
/*
|
||||
* Specialize folly::FormatValue for your type.
|
||||
@ -396,7 +457,7 @@ struct IsFormatter<
|
||||
typename std::enable_if<
|
||||
std::is_same<typename T::IsFormatter, detail::FormatterTag>::value>::
|
||||
type> : public std::true_type {};
|
||||
} // folly::detail
|
||||
} // namespace detail
|
||||
|
||||
// Deprecated API. formatChecked() et. al. now behave identically to their
|
||||
// non-Checked counterparts.
|
||||
@ -409,8 +470,9 @@ inline std::string sformatChecked(StringPiece fmt, Args&&... args) {
|
||||
return formatChecked(fmt, std::forward<Args>(args)...).str();
|
||||
}
|
||||
template <class Container>
|
||||
Formatter<true, Container> vformatChecked(StringPiece fmt,
|
||||
Container&& container) {
|
||||
Formatter<true, Container> vformatChecked(
|
||||
StringPiece fmt,
|
||||
Container&& container) {
|
||||
return vformat(fmt, std::forward<Container>(container));
|
||||
}
|
||||
template <class Container>
|
||||
@ -428,8 +490,8 @@ vformatChecked(Str* out, StringPiece fmt, Container&& container) {
|
||||
vformatChecked(fmt, std::forward<Container>(container)).appendTo(*out);
|
||||
}
|
||||
|
||||
} // namespace folly
|
||||
} // namespace folly
|
||||
|
||||
#include <folly/Format-inl.h>
|
||||
|
||||
#pragma GCC diagnostic pop
|
||||
FOLLY_POP_WARNING
|
||||
|
69
ios/Pods/Folly/folly/FormatArg.h
generated
69
ios/Pods/Folly/folly/FormatArg.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2012-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -17,17 +17,18 @@
|
||||
#pragma once
|
||||
|
||||
#include <stdexcept>
|
||||
|
||||
#include <folly/CPortability.h>
|
||||
#include <folly/Conv.h>
|
||||
#include <folly/Likely.h>
|
||||
#include <folly/Portability.h>
|
||||
#include <folly/Range.h>
|
||||
#include <folly/lang/Exception.h>
|
||||
|
||||
namespace folly {
|
||||
|
||||
class BadFormatArg : public std::invalid_argument {
|
||||
public:
|
||||
explicit BadFormatArg(const std::string& msg)
|
||||
: std::invalid_argument(msg) {}
|
||||
class FOLLY_EXPORT BadFormatArg : public std::invalid_argument {
|
||||
using invalid_argument::invalid_argument;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -39,18 +40,18 @@ struct FormatArg {
|
||||
* passed-in string -- does not copy the given characters.
|
||||
*/
|
||||
explicit FormatArg(StringPiece sp)
|
||||
: fullArgString(sp),
|
||||
fill(kDefaultFill),
|
||||
align(Align::DEFAULT),
|
||||
sign(Sign::DEFAULT),
|
||||
basePrefix(false),
|
||||
thousandsSeparator(false),
|
||||
trailingDot(false),
|
||||
width(kDefaultWidth),
|
||||
widthIndex(kNoIndex),
|
||||
precision(kDefaultPrecision),
|
||||
presentation(kDefaultPresentation),
|
||||
nextKeyMode_(NextKeyMode::NONE) {
|
||||
: fullArgString(sp),
|
||||
fill(kDefaultFill),
|
||||
align(Align::DEFAULT),
|
||||
sign(Sign::DEFAULT),
|
||||
basePrefix(false),
|
||||
thousandsSeparator(false),
|
||||
trailingDot(false),
|
||||
width(kDefaultWidth),
|
||||
widthIndex(kNoIndex),
|
||||
precision(kDefaultPrecision),
|
||||
presentation(kDefaultPresentation),
|
||||
nextKeyMode_(NextKeyMode::NONE) {
|
||||
if (!sp.empty()) {
|
||||
initSlow();
|
||||
}
|
||||
@ -59,7 +60,7 @@ struct FormatArg {
|
||||
enum class Type {
|
||||
INTEGER,
|
||||
FLOAT,
|
||||
OTHER
|
||||
OTHER,
|
||||
};
|
||||
/**
|
||||
* Validate the argument for the given type; throws on error.
|
||||
@ -103,7 +104,7 @@ struct FormatArg {
|
||||
RIGHT,
|
||||
PAD_AFTER_SIGN,
|
||||
CENTER,
|
||||
INVALID
|
||||
INVALID,
|
||||
};
|
||||
Align align;
|
||||
|
||||
@ -115,7 +116,7 @@ struct FormatArg {
|
||||
PLUS_OR_MINUS,
|
||||
MINUS,
|
||||
SPACE_OR_MINUS,
|
||||
INVALID
|
||||
INVALID,
|
||||
};
|
||||
Sign sign;
|
||||
|
||||
@ -159,7 +160,7 @@ struct FormatArg {
|
||||
* Split a key component from "key", which must be non-empty (an exception
|
||||
* is thrown otherwise).
|
||||
*/
|
||||
template <bool emptyOk=false>
|
||||
template <bool emptyOk = false>
|
||||
StringPiece splitKey();
|
||||
|
||||
/**
|
||||
@ -206,13 +207,15 @@ struct FormatArg {
|
||||
template <typename... Args>
|
||||
inline std::string FormatArg::errorStr(Args&&... args) const {
|
||||
return to<std::string>(
|
||||
"invalid format argument {", fullArgString, "}: ",
|
||||
std::forward<Args>(args)...);
|
||||
"invalid format argument {",
|
||||
fullArgString,
|
||||
"}: ",
|
||||
std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
template <typename... Args>
|
||||
[[noreturn]] inline void FormatArg::error(Args&&... args) const {
|
||||
throw BadFormatArg(errorStr(std::forward<Args>(args)...));
|
||||
throw_exception<BadFormatArg>(errorStr(std::forward<Args>(args)...));
|
||||
}
|
||||
|
||||
template <bool emptyOk>
|
||||
@ -225,14 +228,14 @@ template <bool emptyOk>
|
||||
inline StringPiece FormatArg::doSplitKey() {
|
||||
if (nextKeyMode_ == NextKeyMode::STRING) {
|
||||
nextKeyMode_ = NextKeyMode::NONE;
|
||||
if (!emptyOk) { // static
|
||||
if (!emptyOk) { // static
|
||||
enforce(!nextKey_.empty(), "non-empty key required");
|
||||
}
|
||||
return nextKey_;
|
||||
}
|
||||
|
||||
if (key_.empty()) {
|
||||
if (!emptyOk) { // static
|
||||
if (!emptyOk) { // static
|
||||
error("non-empty key required");
|
||||
}
|
||||
return StringPiece();
|
||||
@ -243,10 +246,10 @@ inline StringPiece FormatArg::doSplitKey() {
|
||||
const char* p;
|
||||
if (e[-1] == ']') {
|
||||
--e;
|
||||
p = static_cast<const char*>(memchr(b, '[', e - b));
|
||||
enforce(p, "unmatched ']'");
|
||||
p = static_cast<const char*>(memchr(b, '[', size_t(e - b)));
|
||||
enforce(p != nullptr, "unmatched ']'");
|
||||
} else {
|
||||
p = static_cast<const char*>(memchr(b, '.', e - b));
|
||||
p = static_cast<const char*>(memchr(b, '.', size_t(e - b)));
|
||||
}
|
||||
if (p) {
|
||||
key_.assign(p + 1, e);
|
||||
@ -254,7 +257,7 @@ inline StringPiece FormatArg::doSplitKey() {
|
||||
p = e;
|
||||
key_.clear();
|
||||
}
|
||||
if (!emptyOk) { // static
|
||||
if (!emptyOk) { // static
|
||||
enforce(b != p, "non-empty key required");
|
||||
}
|
||||
return StringPiece(b, p);
|
||||
@ -267,10 +270,10 @@ inline int FormatArg::splitIntKey() {
|
||||
}
|
||||
try {
|
||||
return to<int>(doSplitKey<true>());
|
||||
} catch (const std::out_of_range& e) {
|
||||
} catch (const std::out_of_range&) {
|
||||
error("integer key required");
|
||||
return 0; // unreached
|
||||
return 0; // unreached
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace folly
|
||||
} // namespace folly
|
||||
|
12
ios/Pods/Folly/folly/FormatTraits.h
generated
12
ios/Pods/Folly/folly/FormatTraits.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2015-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -16,9 +16,11 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstddef>
|
||||
#include <type_traits>
|
||||
|
||||
namespace folly { namespace detail {
|
||||
namespace folly {
|
||||
namespace detail {
|
||||
|
||||
// Shortcut, so we don't have to use enable_if everywhere
|
||||
struct FormatTraitsBase {
|
||||
@ -28,7 +30,8 @@ struct FormatTraitsBase {
|
||||
// Traits that define enabled, value_type, and at() for anything
|
||||
// indexable with integral keys: pointers, arrays, vectors, and maps
|
||||
// with integral keys
|
||||
template <class T, class Enable = void> struct IndexableTraits;
|
||||
template <class T, class Enable = void>
|
||||
struct IndexableTraits;
|
||||
|
||||
// Base class for sequences (vectors, deques)
|
||||
template <class C>
|
||||
@ -60,4 +63,5 @@ struct IndexableTraitsAssoc : public FormatTraitsBase {
|
||||
}
|
||||
};
|
||||
|
||||
}} // namespaces
|
||||
} // namespace detail
|
||||
} // namespace folly
|
||||
|
431
ios/Pods/Folly/folly/Function.h
generated
431
ios/Pods/Folly/folly/Function.h
generated
@ -1,7 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
*
|
||||
* @author Eric Niebler (eniebler@fb.com), Sven Over (over@fb.com)
|
||||
* Copyright 2016-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -14,7 +12,9 @@
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
/*
|
||||
* @author Eric Niebler (eniebler@fb.com), Sven Over (over@fb.com)
|
||||
* Acknowledgements: Giuseppe Ottaviano (ott@fb.com)
|
||||
*/
|
||||
|
||||
@ -225,6 +225,9 @@
|
||||
|
||||
#include <folly/CppAttributes.h>
|
||||
#include <folly/Portability.h>
|
||||
#include <folly/Traits.h>
|
||||
#include <folly/functional/Invoke.h>
|
||||
#include <folly/lang/Exception.h>
|
||||
|
||||
namespace folly {
|
||||
|
||||
@ -235,25 +238,39 @@ template <typename ReturnType, typename... Args>
|
||||
Function<ReturnType(Args...) const> constCastFunction(
|
||||
Function<ReturnType(Args...)>&&) noexcept;
|
||||
|
||||
#if FOLLY_HAVE_NOEXCEPT_FUNCTION_TYPE
|
||||
template <typename ReturnType, typename... Args>
|
||||
Function<ReturnType(Args...) const noexcept> constCastFunction(
|
||||
Function<ReturnType(Args...) noexcept>&&) noexcept;
|
||||
#endif
|
||||
|
||||
namespace detail {
|
||||
namespace function {
|
||||
|
||||
enum class Op { MOVE, NUKE, FULL, HEAP };
|
||||
enum class Op { MOVE, NUKE, HEAP };
|
||||
|
||||
union Data {
|
||||
Data() {}
|
||||
void* big;
|
||||
std::aligned_storage<6 * sizeof(void*)>::type tiny;
|
||||
};
|
||||
|
||||
template <typename Fun, typename FunT = typename std::decay<Fun>::type>
|
||||
using IsSmall = std::integral_constant<
|
||||
bool,
|
||||
(sizeof(FunT) <= sizeof(Data::tiny) &&
|
||||
// Same as is_nothrow_move_constructible, but w/ no template instantiation.
|
||||
noexcept(FunT(std::declval<FunT&&>())))>;
|
||||
template <typename Fun, typename = Fun*>
|
||||
using IsSmall = Conjunction<
|
||||
bool_constant<(sizeof(Fun) <= sizeof(Data::tiny))>,
|
||||
std::is_nothrow_move_constructible<Fun>>;
|
||||
using SmallTag = std::true_type;
|
||||
using HeapTag = std::false_type;
|
||||
|
||||
template <typename T>
|
||||
struct NotFunction : std::true_type {};
|
||||
template <typename T>
|
||||
struct NotFunction<Function<T>> : std::false_type {};
|
||||
|
||||
template <typename T>
|
||||
using EnableIfNotFunction =
|
||||
typename std::enable_if<NotFunction<T>::value>::type;
|
||||
|
||||
struct CoerceTag {};
|
||||
|
||||
template <typename T>
|
||||
@ -265,9 +282,15 @@ std::false_type isNullPtrFn(T&&) {
|
||||
return {};
|
||||
}
|
||||
|
||||
inline bool uninitNoop(Op, Data*, Data*) {
|
||||
return false;
|
||||
}
|
||||
template <typename F, typename... Args>
|
||||
using CallableResult = decltype(std::declval<F>()(std::declval<Args>()...));
|
||||
|
||||
template <
|
||||
typename From,
|
||||
typename To,
|
||||
typename = typename std::enable_if<
|
||||
!std::is_reference<To>::value || std::is_reference<From>::value>::type>
|
||||
using SafeResultOf = decltype(static_cast<To>(std::declval<From>()));
|
||||
|
||||
template <typename FunctionType>
|
||||
struct FunctionTraits;
|
||||
@ -280,9 +303,9 @@ struct FunctionTraits<ReturnType(Args...)> {
|
||||
using NonConstSignature = ReturnType(Args...);
|
||||
using OtherSignature = ConstSignature;
|
||||
|
||||
template <typename F, typename G = typename std::decay<F>::type>
|
||||
using ResultOf = decltype(
|
||||
static_cast<ReturnType>(std::declval<G&>()(std::declval<Args>()...)));
|
||||
template <typename F>
|
||||
using ResultOf =
|
||||
SafeResultOf<CallableResult<_t<std::decay<F>>&, Args...>, ReturnType>;
|
||||
|
||||
template <typename Fun>
|
||||
static ReturnType callSmall(Data& p, Args&&... args) {
|
||||
@ -301,17 +324,16 @@ struct FunctionTraits<ReturnType(Args...)> {
|
||||
}
|
||||
|
||||
ReturnType operator()(Args... args) {
|
||||
auto& fn = *static_cast<Function<ReturnType(Args...)>*>(this);
|
||||
auto& fn = *static_cast<Function<NonConstSignature>*>(this);
|
||||
return fn.call_(fn.data_, static_cast<Args&&>(args)...);
|
||||
}
|
||||
|
||||
class SharedProxy {
|
||||
std::shared_ptr<Function<ReturnType(Args...)>> sp_;
|
||||
std::shared_ptr<Function<NonConstSignature>> sp_;
|
||||
|
||||
public:
|
||||
explicit SharedProxy(Function<ReturnType(Args...)>&& func)
|
||||
: sp_(std::make_shared<Function<ReturnType(Args...)>>(
|
||||
std::move(func))) {}
|
||||
explicit SharedProxy(Function<NonConstSignature>&& func)
|
||||
: sp_(std::make_shared<Function<NonConstSignature>>(std::move(func))) {}
|
||||
ReturnType operator()(Args&&... args) const {
|
||||
return (*sp_)(static_cast<Args&&>(args)...);
|
||||
}
|
||||
@ -326,9 +348,10 @@ struct FunctionTraits<ReturnType(Args...) const> {
|
||||
using NonConstSignature = ReturnType(Args...);
|
||||
using OtherSignature = NonConstSignature;
|
||||
|
||||
template <typename F, typename G = typename std::decay<F>::type>
|
||||
using ResultOf = decltype(static_cast<ReturnType>(
|
||||
std::declval<const G&>()(std::declval<Args>()...)));
|
||||
template <typename F>
|
||||
using ResultOf = SafeResultOf<
|
||||
CallableResult<const _t<std::decay<F>>&, Args...>,
|
||||
ReturnType>;
|
||||
|
||||
template <typename Fun>
|
||||
static ReturnType callSmall(Data& p, Args&&... args) {
|
||||
@ -347,23 +370,115 @@ struct FunctionTraits<ReturnType(Args...) const> {
|
||||
}
|
||||
|
||||
ReturnType operator()(Args... args) const {
|
||||
auto& fn = *static_cast<const Function<ReturnType(Args...) const>*>(this);
|
||||
auto& fn = *static_cast<const Function<ConstSignature>*>(this);
|
||||
return fn.call_(fn.data_, static_cast<Args&&>(args)...);
|
||||
}
|
||||
|
||||
struct SharedProxy {
|
||||
std::shared_ptr<Function<ReturnType(Args...) const>> sp_;
|
||||
class SharedProxy {
|
||||
std::shared_ptr<Function<ConstSignature>> sp_;
|
||||
|
||||
public:
|
||||
explicit SharedProxy(Function<ReturnType(Args...) const>&& func)
|
||||
: sp_(std::make_shared<Function<ReturnType(Args...) const>>(
|
||||
std::move(func))) {}
|
||||
explicit SharedProxy(Function<ConstSignature>&& func)
|
||||
: sp_(std::make_shared<Function<ConstSignature>>(std::move(func))) {}
|
||||
ReturnType operator()(Args&&... args) const {
|
||||
return (*sp_)(static_cast<Args&&>(args)...);
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
#if FOLLY_HAVE_NOEXCEPT_FUNCTION_TYPE
|
||||
template <typename ReturnType, typename... Args>
|
||||
struct FunctionTraits<ReturnType(Args...) noexcept> {
|
||||
using Call = ReturnType (*)(Data&, Args&&...) noexcept;
|
||||
using IsConst = std::false_type;
|
||||
using ConstSignature = ReturnType(Args...) const noexcept;
|
||||
using NonConstSignature = ReturnType(Args...) noexcept;
|
||||
using OtherSignature = ConstSignature;
|
||||
|
||||
template <typename F>
|
||||
using ResultOf =
|
||||
SafeResultOf<CallableResult<_t<std::decay<F>>&, Args...>, ReturnType>;
|
||||
|
||||
template <typename Fun>
|
||||
static ReturnType callSmall(Data& p, Args&&... args) noexcept {
|
||||
return static_cast<ReturnType>((*static_cast<Fun*>(
|
||||
static_cast<void*>(&p.tiny)))(static_cast<Args&&>(args)...));
|
||||
}
|
||||
|
||||
template <typename Fun>
|
||||
static ReturnType callBig(Data& p, Args&&... args) noexcept {
|
||||
return static_cast<ReturnType>(
|
||||
(*static_cast<Fun*>(p.big))(static_cast<Args&&>(args)...));
|
||||
}
|
||||
|
||||
static ReturnType uninitCall(Data&, Args&&...) noexcept {
|
||||
terminate_with<std::bad_function_call>();
|
||||
}
|
||||
|
||||
ReturnType operator()(Args... args) noexcept {
|
||||
auto& fn = *static_cast<Function<NonConstSignature>*>(this);
|
||||
return fn.call_(fn.data_, static_cast<Args&&>(args)...);
|
||||
}
|
||||
|
||||
class SharedProxy {
|
||||
std::shared_ptr<Function<NonConstSignature>> sp_;
|
||||
|
||||
public:
|
||||
explicit SharedProxy(Function<NonConstSignature>&& func)
|
||||
: sp_(std::make_shared<Function<NonConstSignature>>(std::move(func))) {}
|
||||
ReturnType operator()(Args&&... args) const {
|
||||
return (*sp_)(static_cast<Args&&>(args)...);
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
template <typename ReturnType, typename... Args>
|
||||
struct FunctionTraits<ReturnType(Args...) const noexcept> {
|
||||
using Call = ReturnType (*)(Data&, Args&&...) noexcept;
|
||||
using IsConst = std::true_type;
|
||||
using ConstSignature = ReturnType(Args...) const noexcept;
|
||||
using NonConstSignature = ReturnType(Args...) noexcept;
|
||||
using OtherSignature = NonConstSignature;
|
||||
|
||||
template <typename F>
|
||||
using ResultOf = SafeResultOf<
|
||||
CallableResult<const _t<std::decay<F>>&, Args...>,
|
||||
ReturnType>;
|
||||
|
||||
template <typename Fun>
|
||||
static ReturnType callSmall(Data& p, Args&&... args) noexcept {
|
||||
return static_cast<ReturnType>((*static_cast<const Fun*>(
|
||||
static_cast<void*>(&p.tiny)))(static_cast<Args&&>(args)...));
|
||||
}
|
||||
|
||||
template <typename Fun>
|
||||
static ReturnType callBig(Data& p, Args&&... args) noexcept {
|
||||
return static_cast<ReturnType>(
|
||||
(*static_cast<const Fun*>(p.big))(static_cast<Args&&>(args)...));
|
||||
}
|
||||
|
||||
static ReturnType uninitCall(Data&, Args&&...) noexcept {
|
||||
throw std::bad_function_call();
|
||||
}
|
||||
|
||||
ReturnType operator()(Args... args) const noexcept {
|
||||
auto& fn = *static_cast<const Function<ConstSignature>*>(this);
|
||||
return fn.call_(fn.data_, static_cast<Args&&>(args)...);
|
||||
}
|
||||
|
||||
class SharedProxy {
|
||||
std::shared_ptr<Function<ConstSignature>> sp_;
|
||||
|
||||
public:
|
||||
explicit SharedProxy(Function<ConstSignature>&& func)
|
||||
: sp_(std::make_shared<Function<ConstSignature>>(std::move(func))) {}
|
||||
ReturnType operator()(Args&&... args) const {
|
||||
return (*sp_)(static_cast<Args&&>(args)...);
|
||||
}
|
||||
};
|
||||
};
|
||||
#endif
|
||||
|
||||
template <typename Fun>
|
||||
bool execSmall(Op o, Data* src, Data* dst) {
|
||||
switch (o) {
|
||||
@ -374,8 +489,6 @@ bool execSmall(Op o, Data* src, Data* dst) {
|
||||
case Op::NUKE:
|
||||
static_cast<Fun*>(static_cast<void*>(&src->tiny))->~Fun();
|
||||
break;
|
||||
case Op::FULL:
|
||||
return true;
|
||||
case Op::HEAP:
|
||||
break;
|
||||
}
|
||||
@ -392,32 +505,15 @@ bool execBig(Op o, Data* src, Data* dst) {
|
||||
case Op::NUKE:
|
||||
delete static_cast<Fun*>(src->big);
|
||||
break;
|
||||
case Op::FULL:
|
||||
case Op::HEAP:
|
||||
break;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Invoke helper
|
||||
template <typename F, typename... Args>
|
||||
inline auto invoke(F&& f, Args&&... args)
|
||||
-> decltype(std::forward<F>(f)(std::forward<Args>(args)...)) {
|
||||
return std::forward<F>(f)(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
template <typename M, typename C, typename... Args>
|
||||
inline auto invoke(M(C::*d), Args&&... args)
|
||||
-> decltype(std::mem_fn(d)(std::forward<Args>(args)...)) {
|
||||
return std::mem_fn(d)(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
} // namespace function
|
||||
} // namespace detail
|
||||
|
||||
FOLLY_PUSH_WARNING
|
||||
FOLLY_MSVC_DISABLE_WARNING(4521) // Multiple copy constructors
|
||||
FOLLY_MSVC_DISABLE_WARNING(4522) // Multiple assignment operators
|
||||
template <typename FunctionType>
|
||||
class Function final : private detail::function::FunctionTraits<FunctionType> {
|
||||
// These utility types are defined outside of the template to reduce
|
||||
@ -436,20 +532,22 @@ class Function final : private detail::function::FunctionTraits<FunctionType> {
|
||||
template <typename Fun>
|
||||
using IsSmall = detail::function::IsSmall<Fun>;
|
||||
|
||||
using OtherSignature = typename Traits::OtherSignature;
|
||||
|
||||
// The `data_` member is mutable to allow `constCastFunction` to work without
|
||||
// invoking undefined behavior. Const-correctness is only violated when
|
||||
// `FunctionType` is a const function type (e.g., `int() const`) and `*this`
|
||||
// is the result of calling `constCastFunction`.
|
||||
mutable Data data_;
|
||||
mutable Data data_{};
|
||||
Call call_{&Traits::uninitCall};
|
||||
Exec exec_{&detail::function::uninitNoop};
|
||||
Exec exec_{nullptr};
|
||||
|
||||
bool exec(Op o, Data* src, Data* dst) const {
|
||||
return exec_ && exec_(o, src, dst);
|
||||
}
|
||||
|
||||
friend Traits;
|
||||
friend Function<typename Traits::ConstSignature> folly::constCastFunction<>(
|
||||
Function<typename Traits::NonConstSignature>&&) noexcept;
|
||||
friend class Function<OtherSignature>;
|
||||
friend class Function<typename Traits::OtherSignature>;
|
||||
|
||||
template <typename Fun>
|
||||
Function(Fun&& fun, SmallTag) noexcept {
|
||||
@ -469,10 +567,15 @@ class Function final : private detail::function::FunctionTraits<FunctionType> {
|
||||
exec_ = &detail::function::execBig<FunT>;
|
||||
}
|
||||
|
||||
Function(Function<OtherSignature>&& that, CoerceTag) noexcept {
|
||||
that.exec_(Op::MOVE, &that.data_, &data_);
|
||||
std::swap(call_, that.call_);
|
||||
std::swap(exec_, that.exec_);
|
||||
template <typename Signature>
|
||||
Function(Function<Signature>&& that, CoerceTag)
|
||||
: Function(static_cast<Function<Signature>&&>(that), HeapTag{}) {}
|
||||
|
||||
Function(Function<typename Traits::OtherSignature>&& that, CoerceTag) noexcept
|
||||
: call_(that.call_), exec_(that.exec_) {
|
||||
that.call_ = &Traits::uninitCall;
|
||||
that.exec_ = nullptr;
|
||||
exec(Op::MOVE, &that.data_, &data_);
|
||||
}
|
||||
|
||||
public:
|
||||
@ -482,21 +585,24 @@ class Function final : private detail::function::FunctionTraits<FunctionType> {
|
||||
Function() = default;
|
||||
|
||||
// not copyable
|
||||
// NOTE: Deleting the non-const copy constructor is unusual but necessary to
|
||||
// prevent copies from non-const `Function` object from selecting the
|
||||
// perfect forwarding implicit converting constructor below
|
||||
// (i.e., `template <typename Fun> Function(Fun&&)`).
|
||||
Function(Function&) = delete;
|
||||
Function(const Function&) = delete;
|
||||
Function(const Function&&) = delete;
|
||||
|
||||
#if __OBJC__
|
||||
// Make sure Objective C blocks are copied
|
||||
template <class ReturnType, class... Args>
|
||||
/*implicit*/ Function(ReturnType (^objCBlock)(Args... args))
|
||||
: Function([blockCopy = (ReturnType(^)(Args...))[objCBlock copy]](
|
||||
Args... args) { return blockCopy(args...); }){};
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Move constructor
|
||||
*/
|
||||
Function(Function&& that) noexcept {
|
||||
that.exec_(Op::MOVE, &that.data_, &data_);
|
||||
std::swap(call_, that.call_);
|
||||
std::swap(exec_, that.exec_);
|
||||
Function(Function&& that) noexcept : call_(that.call_), exec_(that.exec_) {
|
||||
// that must be uninitialized before exec() call in the case of self move
|
||||
that.call_ = &Traits::uninitCall;
|
||||
that.exec_ = nullptr;
|
||||
exec(Op::MOVE, &that.data_, &data_);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -505,32 +611,47 @@ class Function final : private detail::function::FunctionTraits<FunctionType> {
|
||||
/* implicit */ Function(std::nullptr_t) noexcept {}
|
||||
|
||||
/**
|
||||
* Constructs a new `Function` from any callable object. This
|
||||
* handles function pointers, pointers to static member functions,
|
||||
* `std::reference_wrapper` objects, `std::function` objects, and arbitrary
|
||||
* objects that implement `operator()` if the parameter signature
|
||||
* matches (i.e. it returns R when called with Args...).
|
||||
* For a `Function` with a const function type, the object must be
|
||||
* callable from a const-reference, i.e. implement `operator() const`.
|
||||
* For a `Function` with a non-const function type, the object will
|
||||
* Constructs a new `Function` from any callable object that is _not_ a
|
||||
* `folly::Function`. This handles function pointers, pointers to static
|
||||
* member functions, `std::reference_wrapper` objects, `std::function`
|
||||
* objects, and arbitrary objects that implement `operator()` if the parameter
|
||||
* signature matches (i.e. it returns an object convertible to `R` when called
|
||||
* with `Args...`).
|
||||
*
|
||||
* \note `typename Traits::template ResultOf<Fun>` prevents this overload
|
||||
* from being selected by overload resolution when `fun` is not a compatible
|
||||
* function.
|
||||
*
|
||||
* \note The noexcept requires some explanation. `IsSmall` is true when the
|
||||
* decayed type fits within the internal buffer and is noexcept-movable. But
|
||||
* this ctor might copy, not move. What we need here, if this ctor does a
|
||||
* copy, is that this ctor be noexcept when the copy is noexcept. That is not
|
||||
* checked in `IsSmall`, and shouldn't be, because once the `Function` is
|
||||
* constructed, the contained object is never copied. This check is for this
|
||||
* ctor only, in the case that this ctor does a copy.
|
||||
*/
|
||||
template <
|
||||
typename Fun,
|
||||
typename = detail::function::EnableIfNotFunction<Fun>,
|
||||
typename = typename Traits::template ResultOf<Fun>>
|
||||
/* implicit */ Function(Fun fun) noexcept(
|
||||
IsSmall<Fun>::value&& noexcept(Fun(std::declval<Fun>())))
|
||||
: Function(std::move(fun), IsSmall<Fun>{}) {}
|
||||
|
||||
/**
|
||||
* For move-constructing from a `folly::Function<X(Ys...) [const?]>`.
|
||||
* For a `Function` with a `const` function type, the object must be
|
||||
* callable from a `const`-reference, i.e. implement `operator() const`.
|
||||
* For a `Function` with a non-`const` function type, the object will
|
||||
* be called from a non-const reference, which means that it will execute
|
||||
* a non-const `operator()` if it is defined, and falls back to
|
||||
* `operator() const` otherwise.
|
||||
*
|
||||
* \note `typename = ResultOf<Fun>` prevents this overload from being
|
||||
* selected by overload resolution when `fun` is not a compatible function.
|
||||
*/
|
||||
template <class Fun, typename = typename Traits::template ResultOf<Fun>>
|
||||
/* implicit */ Function(Fun&& fun) noexcept(IsSmall<Fun>::value)
|
||||
: Function(static_cast<Fun&&>(fun), IsSmall<Fun>{}) {}
|
||||
|
||||
/**
|
||||
* For moving a `Function<X(Ys..) const>` into a `Function<X(Ys...)>`.
|
||||
*/
|
||||
template <
|
||||
bool Const = Traits::IsConst::value,
|
||||
typename std::enable_if<!Const, int>::type = 0>
|
||||
Function(Function<OtherSignature>&& that) noexcept
|
||||
typename Signature,
|
||||
typename = typename Traits::template ResultOf<Function<Signature>>>
|
||||
Function(Function<Signature>&& that) noexcept(
|
||||
noexcept(Function(std::move(that), CoerceTag{})))
|
||||
: Function(std::move(that), CoerceTag{}) {}
|
||||
|
||||
/**
|
||||
@ -550,25 +671,40 @@ class Function final : private detail::function::FunctionTraits<FunctionType> {
|
||||
}
|
||||
|
||||
~Function() {
|
||||
exec_(Op::NUKE, &data_, nullptr);
|
||||
exec(Op::NUKE, &data_, nullptr);
|
||||
}
|
||||
|
||||
Function& operator=(Function&) = delete;
|
||||
Function& operator=(const Function&) = delete;
|
||||
|
||||
#if __OBJC__
|
||||
// Make sure Objective C blocks are copied
|
||||
template <class ReturnType, class... Args>
|
||||
/* implicit */ Function& operator=(ReturnType (^objCBlock)(Args... args)) {
|
||||
(*this) = [blockCopy = (ReturnType(^)(Args...))[objCBlock copy]](
|
||||
Args... args) { return blockCopy(args...); };
|
||||
return *this;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Move assignment operator
|
||||
*
|
||||
* \note Leaves `that` in a valid but unspecified state. If `&that == this`
|
||||
* then `*this` is left in a valid but unspecified state.
|
||||
*/
|
||||
Function& operator=(Function&& that) noexcept {
|
||||
if (&that != this) {
|
||||
// Q: Why is is safe to destroy and reconstruct this object in place?
|
||||
// A: Two reasons: First, `Function` is a final class, so in doing this
|
||||
// we aren't slicing off any derived parts. And second, the move
|
||||
// operation is guaranteed not to throw so we always leave the object
|
||||
// in a valid state.
|
||||
this->~Function();
|
||||
::new (this) Function(std::move(that));
|
||||
}
|
||||
// Q: Why is it safe to destroy and reconstruct this object in place?
|
||||
// A: Two reasons: First, `Function` is a final class, so in doing this
|
||||
// we aren't slicing off any derived parts. And second, the move
|
||||
// operation is guaranteed not to throw so we always leave the object
|
||||
// in a valid state.
|
||||
// In the case of self-move (this == &that), this leaves the object in
|
||||
// a default-constructed state. First the object is destroyed, then we
|
||||
// pass the destroyed object to the move constructor. The first thing the
|
||||
// move constructor does is default-construct the object. That object is
|
||||
// "moved" into itself, which is a no-op for a default-constructed Function.
|
||||
this->~Function();
|
||||
::new (this) Function(std::move(that));
|
||||
return *this;
|
||||
}
|
||||
|
||||
@ -576,25 +712,37 @@ class Function final : private detail::function::FunctionTraits<FunctionType> {
|
||||
* Assigns a callable object to this `Function`. If the operation fails,
|
||||
* `*this` is left unmodified.
|
||||
*
|
||||
* \note `typename = ResultOf<Fun>` prevents this overload from being
|
||||
* selected by overload resolution when `fun` is not a compatible function.
|
||||
* \note `typename = decltype(Function(std::declval<Fun>()))` prevents this
|
||||
* overload from being selected by overload resolution when `fun` is not a
|
||||
* compatible function.
|
||||
*/
|
||||
template <class Fun, typename = typename Traits::template ResultOf<Fun>>
|
||||
Function& operator=(Fun&& fun) noexcept(
|
||||
template <typename Fun, typename = decltype(Function(std::declval<Fun>()))>
|
||||
Function& operator=(Fun fun) noexcept(
|
||||
noexcept(/* implicit */ Function(std::declval<Fun>()))) {
|
||||
// Doing this in place is more efficient when we can do so safely.
|
||||
if (noexcept(/* implicit */ Function(std::declval<Fun>()))) {
|
||||
// Q: Why is is safe to destroy and reconstruct this object in place?
|
||||
// A: See the explanation in the move assignment operator.
|
||||
this->~Function();
|
||||
::new (this) Function(static_cast<Fun&&>(fun));
|
||||
::new (this) Function(std::move(fun));
|
||||
} else {
|
||||
// Construct a temporary and (nothrow) swap.
|
||||
Function(static_cast<Fun&&>(fun)).swap(*this);
|
||||
Function(std::move(fun)).swap(*this);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
/**
|
||||
* For assigning from a `Function<X(Ys..) [const?]>`.
|
||||
*/
|
||||
template <
|
||||
typename Signature,
|
||||
typename = typename Traits::template ResultOf<Function<Signature>>>
|
||||
Function& operator=(Function<Signature>&& that) noexcept(
|
||||
noexcept(Function(std::move(that)))) {
|
||||
return (*this = Function(std::move(that)));
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears this `Function`.
|
||||
*/
|
||||
@ -631,7 +779,7 @@ class Function final : private detail::function::FunctionTraits<FunctionType> {
|
||||
* non-empty.
|
||||
*/
|
||||
explicit operator bool() const noexcept {
|
||||
return exec_(Op::FULL, nullptr, nullptr);
|
||||
return exec_ != nullptr;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -641,7 +789,7 @@ class Function final : private detail::function::FunctionTraits<FunctionType> {
|
||||
* object itself.
|
||||
*/
|
||||
bool hasAllocatedMemory() const noexcept {
|
||||
return exec_(Op::HEAP, nullptr, nullptr);
|
||||
return exec(Op::HEAP, nullptr, nullptr);
|
||||
}
|
||||
|
||||
using typename Traits::SharedProxy;
|
||||
@ -663,7 +811,6 @@ class Function final : private detail::function::FunctionTraits<FunctionType> {
|
||||
return std::move(*this).asSharedProxy();
|
||||
}
|
||||
};
|
||||
FOLLY_POP_WARNING
|
||||
|
||||
template <typename FunctionType>
|
||||
void swap(Function<FunctionType>& lhs, Function<FunctionType>& rhs) noexcept {
|
||||
@ -707,6 +854,21 @@ Function<ReturnType(Args...) const> constCastFunction(
|
||||
return std::move(that);
|
||||
}
|
||||
|
||||
#if FOLLY_HAVE_NOEXCEPT_FUNCTION_TYPE
|
||||
template <typename ReturnType, typename... Args>
|
||||
Function<ReturnType(Args...) const noexcept> constCastFunction(
|
||||
Function<ReturnType(Args...) noexcept>&& that) noexcept {
|
||||
return Function<ReturnType(Args...) const noexcept>{
|
||||
std::move(that), detail::function::CoerceTag{}};
|
||||
}
|
||||
|
||||
template <typename ReturnType, typename... Args>
|
||||
Function<ReturnType(Args...) const noexcept> constCastFunction(
|
||||
Function<ReturnType(Args...) const noexcept>&& that) noexcept {
|
||||
return std::move(that);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @class FunctionRef
|
||||
*
|
||||
@ -734,19 +896,21 @@ template <typename ReturnType, typename... Args>
|
||||
class FunctionRef<ReturnType(Args...)> final {
|
||||
using Call = ReturnType (*)(void*, Args&&...);
|
||||
|
||||
void* object_{nullptr};
|
||||
Call call_{&FunctionRef::uninitCall};
|
||||
|
||||
static ReturnType uninitCall(void*, Args&&...) {
|
||||
throw std::bad_function_call();
|
||||
}
|
||||
|
||||
template <typename Fun>
|
||||
static ReturnType call(void* object, Args&&... args) {
|
||||
return static_cast<ReturnType>(detail::function::invoke(
|
||||
*static_cast<Fun*>(object), static_cast<Args&&>(args)...));
|
||||
using Pointer = _t<std::add_pointer<Fun>>;
|
||||
return static_cast<ReturnType>(invoke(
|
||||
static_cast<Fun&&>(*static_cast<Pointer>(object)),
|
||||
static_cast<Args&&>(args)...));
|
||||
}
|
||||
|
||||
void* object_{nullptr};
|
||||
Call call_{&FunctionRef::uninitCall};
|
||||
|
||||
public:
|
||||
/**
|
||||
* Default constructor. Constructs an empty FunctionRef.
|
||||
@ -758,28 +922,29 @@ class FunctionRef<ReturnType(Args...)> final {
|
||||
/**
|
||||
* Construct a FunctionRef from a reference to a callable object.
|
||||
*/
|
||||
template <typename Fun>
|
||||
/* implicit */ FunctionRef(Fun&& fun) noexcept {
|
||||
using ReferencedType = typename std::remove_reference<Fun>::type;
|
||||
|
||||
static_assert(
|
||||
std::is_convertible<
|
||||
typename std::result_of<ReferencedType&(Args && ...)>::type,
|
||||
ReturnType>::value,
|
||||
"FunctionRef cannot be constructed from object with "
|
||||
"incompatible function signature");
|
||||
|
||||
// `Fun` may be a const type, in which case we have to do a const_cast
|
||||
// to store the address in a `void*`. This is safe because the `void*`
|
||||
// will be cast back to `Fun*` (which is a const pointer whenever `Fun`
|
||||
// is a const type) inside `FunctionRef::call`
|
||||
object_ = const_cast<void*>(static_cast<void const*>(std::addressof(fun)));
|
||||
call_ = &FunctionRef::call<ReferencedType>;
|
||||
}
|
||||
template <
|
||||
typename Fun,
|
||||
typename std::enable_if<
|
||||
Conjunction<
|
||||
Negation<std::is_same<FunctionRef, _t<std::decay<Fun>>>>,
|
||||
is_invocable_r<ReturnType, Fun&&, Args&&...>>::value,
|
||||
int>::type = 0>
|
||||
constexpr /* implicit */ FunctionRef(Fun&& fun) noexcept
|
||||
// `Fun` may be a const type, in which case we have to do a const_cast
|
||||
// to store the address in a `void*`. This is safe because the `void*`
|
||||
// will be cast back to `Fun*` (which is a const pointer whenever `Fun`
|
||||
// is a const type) inside `FunctionRef::call`
|
||||
: object_(
|
||||
const_cast<void*>(static_cast<void const*>(std::addressof(fun)))),
|
||||
call_(&FunctionRef::call<Fun>) {}
|
||||
|
||||
ReturnType operator()(Args... args) const {
|
||||
return call_(object_, static_cast<Args&&>(args)...);
|
||||
}
|
||||
|
||||
constexpr explicit operator bool() const {
|
||||
return object_;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace folly
|
||||
|
59
ios/Pods/Folly/folly/GLog.h
generated
Normal file
59
ios/Pods/Folly/folly/GLog.h
generated
Normal file
@ -0,0 +1,59 @@
|
||||
/*
|
||||
* Copyright 2012-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
|
||||
#include <glog/logging.h>
|
||||
|
||||
#ifndef FB_LOG_EVERY_MS
|
||||
/**
|
||||
* Issues a LOG(severity) no more often than every
|
||||
* milliseconds. Example:
|
||||
*
|
||||
* FB_LOG_EVERY_MS(INFO, 10000) << "At least ten seconds passed"
|
||||
* " since you last saw this.";
|
||||
*
|
||||
* The implementation uses for statements to introduce variables in
|
||||
* a nice way that doesn't mess surrounding statements. It is thread
|
||||
* safe. Non-positive intervals will always log.
|
||||
*/
|
||||
#define FB_LOG_EVERY_MS(severity, milli_interval) \
|
||||
for (decltype(milli_interval) FB_LEM_once = 1, \
|
||||
FB_LEM_interval = (milli_interval); \
|
||||
FB_LEM_once;) \
|
||||
for (::std::chrono::milliseconds::rep FB_LEM_prev, \
|
||||
FB_LEM_now = FB_LEM_interval <= 0 \
|
||||
? 0 \
|
||||
: ::std::chrono::duration_cast<::std::chrono::milliseconds>( \
|
||||
::std::chrono::system_clock::now().time_since_epoch()) \
|
||||
.count(); \
|
||||
FB_LEM_once;) \
|
||||
for (static ::std::atomic<::std::chrono::milliseconds::rep> FB_LEM_hist; \
|
||||
FB_LEM_once; \
|
||||
FB_LEM_once = 0) \
|
||||
if (FB_LEM_interval > 0 && \
|
||||
(FB_LEM_now - \
|
||||
(FB_LEM_prev = \
|
||||
FB_LEM_hist.load(std::memory_order_acquire)) < \
|
||||
FB_LEM_interval || \
|
||||
!FB_LEM_hist.compare_exchange_strong(FB_LEM_prev, FB_LEM_now))) { \
|
||||
} else \
|
||||
LOG(severity)
|
||||
|
||||
#endif
|
264
ios/Pods/Folly/folly/GroupVarint.h
generated
264
ios/Pods/Folly/folly/GroupVarint.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2012-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -16,37 +16,39 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <limits>
|
||||
|
||||
#include <glog/logging.h>
|
||||
|
||||
#if !defined(__GNUC__) && !defined(_MSC_VER)
|
||||
#error GroupVarint.h requires GCC or MSVC
|
||||
#endif
|
||||
|
||||
#include <folly/Portability.h>
|
||||
|
||||
#if FOLLY_X64 || defined(__i386__) || FOLLY_PPC64 || FOLLY_A64
|
||||
#if FOLLY_X64 || defined(__i386__) || FOLLY_PPC64 || FOLLY_AARCH64
|
||||
#define HAVE_GROUP_VARINT 1
|
||||
|
||||
#include <cstdint>
|
||||
#include <limits>
|
||||
#include <folly/detail/GroupVarintDetail.h>
|
||||
#include <folly/Bits.h>
|
||||
#include <folly/Range.h>
|
||||
#include <folly/detail/GroupVarintDetail.h>
|
||||
#include <folly/lang/Bits.h>
|
||||
#include <folly/portability/Builtins.h>
|
||||
#include <glog/logging.h>
|
||||
|
||||
#if FOLLY_SSE >= 3
|
||||
#include <nmmintrin.h>
|
||||
namespace folly {
|
||||
namespace detail {
|
||||
alignas(16) extern const uint64_t groupVarintSSEMasks[];
|
||||
} // namespace detail
|
||||
} // namespace folly
|
||||
extern const std::array<std::array<std::uint32_t, 4>, 256> groupVarintSSEMasks;
|
||||
} // namespace detail
|
||||
} // namespace folly
|
||||
#endif
|
||||
|
||||
namespace folly {
|
||||
namespace detail {
|
||||
extern const uint8_t groupVarintLengths[];
|
||||
} // namespace detail
|
||||
} // namespace folly
|
||||
extern const std::array<std::uint8_t, 256> groupVarintLengths;
|
||||
} // namespace detail
|
||||
} // namespace folly
|
||||
|
||||
namespace folly {
|
||||
|
||||
@ -66,7 +68,6 @@ class GroupVarint;
|
||||
template <>
|
||||
class GroupVarint<uint32_t> : public detail::GroupVarintBase<uint32_t> {
|
||||
public:
|
||||
|
||||
/**
|
||||
* Return the number of bytes used to encode these four values.
|
||||
*/
|
||||
@ -102,16 +103,24 @@ class GroupVarint<uint32_t> : public detail::GroupVarintBase<uint32_t> {
|
||||
* buffer of size bytes.
|
||||
*/
|
||||
static size_t partialCount(const char* p, size_t size) {
|
||||
char v = *p;
|
||||
uint8_t v = uint8_t(*p);
|
||||
size_t s = kHeaderSize;
|
||||
s += 1 + b0key(v);
|
||||
if (s > size) return 0;
|
||||
if (s > size) {
|
||||
return 0;
|
||||
}
|
||||
s += 1 + b1key(v);
|
||||
if (s > size) return 1;
|
||||
if (s > size) {
|
||||
return 1;
|
||||
}
|
||||
s += 1 + b2key(v);
|
||||
if (s > size) return 2;
|
||||
if (s > size) {
|
||||
return 2;
|
||||
}
|
||||
s += 1 + b3key(v);
|
||||
if (s > size) return 3;
|
||||
if (s > size) {
|
||||
return 3;
|
||||
}
|
||||
return 4;
|
||||
}
|
||||
|
||||
@ -120,8 +129,8 @@ class GroupVarint<uint32_t> : public detail::GroupVarintBase<uint32_t> {
|
||||
* return the number of bytes used by the encoding.
|
||||
*/
|
||||
static size_t encodedSize(const char* p) {
|
||||
return (kHeaderSize + kGroupSize +
|
||||
b0key(*p) + b1key(*p) + b2key(*p) + b3key(*p));
|
||||
return kHeaderSize + kGroupSize + b0key(uint8_t(*p)) + b1key(uint8_t(*p)) +
|
||||
b2key(uint8_t(*p)) + b3key(uint8_t(*p));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -136,13 +145,13 @@ class GroupVarint<uint32_t> : public detail::GroupVarintBase<uint32_t> {
|
||||
uint8_t b3key = key(d);
|
||||
*p++ = (b3key << 6) | (b2key << 4) | (b1key << 2) | b0key;
|
||||
storeUnaligned(p, a);
|
||||
p += b0key+1;
|
||||
p += b0key + 1;
|
||||
storeUnaligned(p, b);
|
||||
p += b1key+1;
|
||||
p += b1key + 1;
|
||||
storeUnaligned(p, c);
|
||||
p += b2key+1;
|
||||
p += b2key + 1;
|
||||
storeUnaligned(p, d);
|
||||
p += b3key+1;
|
||||
p += b3key + 1;
|
||||
return p;
|
||||
}
|
||||
|
||||
@ -160,20 +169,24 @@ class GroupVarint<uint32_t> : public detail::GroupVarintBase<uint32_t> {
|
||||
* The buffer needs to have at least 3 extra bytes available (they
|
||||
* may be read but ignored).
|
||||
*/
|
||||
static const char* decode_simple(const char* p, uint32_t* a, uint32_t* b,
|
||||
uint32_t* c, uint32_t* d) {
|
||||
static const char* decode_simple(
|
||||
const char* p,
|
||||
uint32_t* a,
|
||||
uint32_t* b,
|
||||
uint32_t* c,
|
||||
uint32_t* d) {
|
||||
size_t k = loadUnaligned<uint8_t>(p);
|
||||
const char* end = p + detail::groupVarintLengths[k];
|
||||
++p;
|
||||
size_t k0 = b0key(k);
|
||||
*a = loadUnaligned<uint32_t>(p) & kMask[k0];
|
||||
p += k0+1;
|
||||
p += k0 + 1;
|
||||
size_t k1 = b1key(k);
|
||||
*b = loadUnaligned<uint32_t>(p) & kMask[k1];
|
||||
p += k1+1;
|
||||
p += k1 + 1;
|
||||
size_t k2 = b2key(k);
|
||||
*c = loadUnaligned<uint32_t>(p) & kMask[k2];
|
||||
p += k2+1;
|
||||
p += k2 + 1;
|
||||
size_t k3 = b3key(k);
|
||||
*d = loadUnaligned<uint32_t>(p) & kMask[k3];
|
||||
// p += k3+1;
|
||||
@ -185,7 +198,7 @@ class GroupVarint<uint32_t> : public detail::GroupVarintBase<uint32_t> {
|
||||
* pointed-to by dest, similar to decode(p,a,b,c,d) above.
|
||||
*/
|
||||
static const char* decode_simple(const char* p, uint32_t* dest) {
|
||||
return decode_simple(p, dest, dest+1, dest+2, dest+3);
|
||||
return decode_simple(p, dest, dest + 1, dest + 2, dest + 3);
|
||||
}
|
||||
|
||||
#if FOLLY_SSE >= 3
|
||||
@ -194,10 +207,10 @@ class GroupVarint<uint32_t> : public detail::GroupVarintBase<uint32_t> {
|
||||
* that we must be able to read at least 17 bytes from the input pointer, p.
|
||||
*/
|
||||
static const char* decode(const char* p, uint32_t* dest) {
|
||||
uint8_t key = p[0];
|
||||
__m128i val = _mm_loadu_si128((const __m128i*)(p+1));
|
||||
uint8_t key = uint8_t(p[0]);
|
||||
__m128i val = _mm_loadu_si128((const __m128i*)(p + 1));
|
||||
__m128i mask =
|
||||
_mm_load_si128((const __m128i*)&detail::groupVarintSSEMasks[key * 2]);
|
||||
_mm_load_si128((const __m128i*)detail::groupVarintSSEMasks[key].data());
|
||||
__m128i r = _mm_shuffle_epi8(val, mask);
|
||||
_mm_storeu_si128((__m128i*)dest, r);
|
||||
return p + detail::groupVarintLengths[key];
|
||||
@ -207,55 +220,62 @@ class GroupVarint<uint32_t> : public detail::GroupVarintBase<uint32_t> {
|
||||
* Just like decode_simple, but with the additional constraint that
|
||||
* we must be able to read at least 17 bytes from the input pointer, p.
|
||||
*/
|
||||
static const char* decode(const char* p, uint32_t* a, uint32_t* b,
|
||||
uint32_t* c, uint32_t* d) {
|
||||
uint8_t key = p[0];
|
||||
__m128i val = _mm_loadu_si128((const __m128i*)(p+1));
|
||||
static const char*
|
||||
decode(const char* p, uint32_t* a, uint32_t* b, uint32_t* c, uint32_t* d) {
|
||||
uint8_t key = uint8_t(p[0]);
|
||||
__m128i val = _mm_loadu_si128((const __m128i*)(p + 1));
|
||||
__m128i mask =
|
||||
_mm_load_si128((const __m128i*)&detail::groupVarintSSEMasks[key * 2]);
|
||||
_mm_load_si128((const __m128i*)detail::groupVarintSSEMasks[key].data());
|
||||
__m128i r = _mm_shuffle_epi8(val, mask);
|
||||
|
||||
// Extracting 32 bits at a time out of an XMM register is a SSE4 feature
|
||||
#if FOLLY_SSE >= 4
|
||||
*a = _mm_extract_epi32(r, 0);
|
||||
*b = _mm_extract_epi32(r, 1);
|
||||
*c = _mm_extract_epi32(r, 2);
|
||||
*d = _mm_extract_epi32(r, 3);
|
||||
#else /* !__SSE4__ */
|
||||
*a = uint32_t(_mm_extract_epi32(r, 0));
|
||||
*b = uint32_t(_mm_extract_epi32(r, 1));
|
||||
*c = uint32_t(_mm_extract_epi32(r, 2));
|
||||
*d = uint32_t(_mm_extract_epi32(r, 3));
|
||||
#else /* !__SSE4__ */
|
||||
*a = _mm_extract_epi16(r, 0) + (_mm_extract_epi16(r, 1) << 16);
|
||||
*b = _mm_extract_epi16(r, 2) + (_mm_extract_epi16(r, 3) << 16);
|
||||
*c = _mm_extract_epi16(r, 4) + (_mm_extract_epi16(r, 5) << 16);
|
||||
*d = _mm_extract_epi16(r, 6) + (_mm_extract_epi16(r, 7) << 16);
|
||||
#endif /* __SSE4__ */
|
||||
#endif /* __SSE4__ */
|
||||
|
||||
return p + detail::groupVarintLengths[key];
|
||||
}
|
||||
|
||||
#else /* !__SSSE3__ */
|
||||
static const char* decode(const char* p, uint32_t* a, uint32_t* b,
|
||||
uint32_t* c, uint32_t* d) {
|
||||
#else /* !__SSSE3__ */
|
||||
static const char*
|
||||
decode(const char* p, uint32_t* a, uint32_t* b, uint32_t* c, uint32_t* d) {
|
||||
return decode_simple(p, a, b, c, d);
|
||||
}
|
||||
|
||||
static const char* decode(const char* p, uint32_t* dest) {
|
||||
return decode_simple(p, dest);
|
||||
}
|
||||
#endif /* __SSSE3__ */
|
||||
#endif /* __SSSE3__ */
|
||||
|
||||
private:
|
||||
static uint8_t key(uint32_t x) {
|
||||
// __builtin_clz is undefined for the x==0 case
|
||||
return 3 - (__builtin_clz(x|1) / 8);
|
||||
return uint8_t(3 - (__builtin_clz(x | 1) / 8));
|
||||
}
|
||||
static size_t b0key(size_t x) {
|
||||
return x & 3;
|
||||
}
|
||||
static size_t b1key(size_t x) {
|
||||
return (x >> 2) & 3;
|
||||
}
|
||||
static size_t b2key(size_t x) {
|
||||
return (x >> 4) & 3;
|
||||
}
|
||||
static size_t b3key(size_t x) {
|
||||
return (x >> 6) & 3;
|
||||
}
|
||||
static size_t b0key(size_t x) { return x & 3; }
|
||||
static size_t b1key(size_t x) { return (x >> 2) & 3; }
|
||||
static size_t b2key(size_t x) { return (x >> 4) & 3; }
|
||||
static size_t b3key(size_t x) { return (x >> 6) & 3; }
|
||||
|
||||
static const uint32_t kMask[];
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* GroupVarint encoding for 64-bit values.
|
||||
*
|
||||
@ -272,10 +292,10 @@ class GroupVarint<uint64_t> : public detail::GroupVarintBase<uint64_t> {
|
||||
/**
|
||||
* Return the number of bytes used to encode these five values.
|
||||
*/
|
||||
static size_t size(uint64_t a, uint64_t b, uint64_t c, uint64_t d,
|
||||
uint64_t e) {
|
||||
return (kHeaderSize + kGroupSize +
|
||||
key(a) + key(b) + key(c) + key(d) + key(e));
|
||||
static size_t
|
||||
size(uint64_t a, uint64_t b, uint64_t c, uint64_t d, uint64_t e) {
|
||||
return kHeaderSize + kGroupSize + key(a) + key(b) + key(c) + key(d) +
|
||||
key(e);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -309,15 +329,25 @@ class GroupVarint<uint64_t> : public detail::GroupVarintBase<uint64_t> {
|
||||
uint16_t v = loadUnaligned<uint16_t>(p);
|
||||
size_t s = kHeaderSize;
|
||||
s += 1 + b0key(v);
|
||||
if (s > size) return 0;
|
||||
if (s > size) {
|
||||
return 0;
|
||||
}
|
||||
s += 1 + b1key(v);
|
||||
if (s > size) return 1;
|
||||
if (s > size) {
|
||||
return 1;
|
||||
}
|
||||
s += 1 + b2key(v);
|
||||
if (s > size) return 2;
|
||||
if (s > size) {
|
||||
return 2;
|
||||
}
|
||||
s += 1 + b3key(v);
|
||||
if (s > size) return 3;
|
||||
if (s > size) {
|
||||
return 3;
|
||||
}
|
||||
s += 1 + b4key(v);
|
||||
if (s > size) return 4;
|
||||
if (s > size) {
|
||||
return 4;
|
||||
}
|
||||
return 5;
|
||||
}
|
||||
|
||||
@ -327,8 +357,8 @@ class GroupVarint<uint64_t> : public detail::GroupVarintBase<uint64_t> {
|
||||
*/
|
||||
static size_t encodedSize(const char* p) {
|
||||
uint16_t n = loadUnaligned<uint16_t>(p);
|
||||
return (kHeaderSize + kGroupSize +
|
||||
b0key(n) + b1key(n) + b2key(n) + b3key(n) + b4key(n));
|
||||
return kHeaderSize + kGroupSize + b0key(n) + b1key(n) + b2key(n) +
|
||||
b3key(n) + b4key(n);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -336,27 +366,29 @@ class GroupVarint<uint64_t> : public detail::GroupVarintBase<uint64_t> {
|
||||
* the next position in the buffer (that is, one character past the last
|
||||
* encoded byte). p needs to have at least size()+8 bytes available.
|
||||
*/
|
||||
static char* encode(char* p, uint64_t a, uint64_t b, uint64_t c,
|
||||
uint64_t d, uint64_t e) {
|
||||
uint8_t b0key = key(a);
|
||||
uint8_t b1key = key(b);
|
||||
uint8_t b2key = key(c);
|
||||
uint8_t b3key = key(d);
|
||||
uint8_t b4key = key(e);
|
||||
static char*
|
||||
encode(char* p, uint64_t a, uint64_t b, uint64_t c, uint64_t d, uint64_t e) {
|
||||
uint16_t b0key = key(a);
|
||||
uint16_t b1key = key(b);
|
||||
uint16_t b2key = key(c);
|
||||
uint16_t b3key = key(d);
|
||||
uint16_t b4key = key(e);
|
||||
storeUnaligned<uint16_t>(
|
||||
p,
|
||||
(b4key << 12) | (b3key << 9) | (b2key << 6) | (b1key << 3) | b0key);
|
||||
uint16_t(
|
||||
(b4key << 12) | (b3key << 9) | (b2key << 6) | (b1key << 3) |
|
||||
b0key));
|
||||
p += 2;
|
||||
storeUnaligned(p, a);
|
||||
p += b0key+1;
|
||||
p += b0key + 1;
|
||||
storeUnaligned(p, b);
|
||||
p += b1key+1;
|
||||
p += b1key + 1;
|
||||
storeUnaligned(p, c);
|
||||
p += b2key+1;
|
||||
p += b2key + 1;
|
||||
storeUnaligned(p, d);
|
||||
p += b3key+1;
|
||||
p += b3key + 1;
|
||||
storeUnaligned(p, e);
|
||||
p += b4key+1;
|
||||
p += b4key + 1;
|
||||
return p;
|
||||
}
|
||||
|
||||
@ -374,25 +406,30 @@ class GroupVarint<uint64_t> : public detail::GroupVarintBase<uint64_t> {
|
||||
* The buffer needs to have at least 7 bytes available (they may be read
|
||||
* but ignored).
|
||||
*/
|
||||
static const char* decode(const char* p, uint64_t* a, uint64_t* b,
|
||||
uint64_t* c, uint64_t* d, uint64_t* e) {
|
||||
static const char* decode(
|
||||
const char* p,
|
||||
uint64_t* a,
|
||||
uint64_t* b,
|
||||
uint64_t* c,
|
||||
uint64_t* d,
|
||||
uint64_t* e) {
|
||||
uint16_t k = loadUnaligned<uint16_t>(p);
|
||||
p += 2;
|
||||
uint8_t k0 = b0key(k);
|
||||
*a = loadUnaligned<uint64_t>(p) & kMask[k0];
|
||||
p += k0+1;
|
||||
p += k0 + 1;
|
||||
uint8_t k1 = b1key(k);
|
||||
*b = loadUnaligned<uint64_t>(p) & kMask[k1];
|
||||
p += k1+1;
|
||||
p += k1 + 1;
|
||||
uint8_t k2 = b2key(k);
|
||||
*c = loadUnaligned<uint64_t>(p) & kMask[k2];
|
||||
p += k2+1;
|
||||
p += k2 + 1;
|
||||
uint8_t k3 = b3key(k);
|
||||
*d = loadUnaligned<uint64_t>(p) & kMask[k3];
|
||||
p += k3+1;
|
||||
p += k3 + 1;
|
||||
uint8_t k4 = b4key(k);
|
||||
*e = loadUnaligned<uint64_t>(p) & kMask[k4];
|
||||
p += k4+1;
|
||||
p += k4 + 1;
|
||||
return p;
|
||||
}
|
||||
|
||||
@ -401,7 +438,7 @@ class GroupVarint<uint64_t> : public detail::GroupVarintBase<uint64_t> {
|
||||
* pointed-to by dest, similar to decode(p,a,b,c,d,e) above.
|
||||
*/
|
||||
static const char* decode(const char* p, uint64_t* dest) {
|
||||
return decode(p, dest, dest+1, dest+2, dest+3, dest+4);
|
||||
return decode(p, dest, dest + 1, dest + 2, dest + 3, dest + 4);
|
||||
}
|
||||
|
||||
private:
|
||||
@ -409,14 +446,24 @@ class GroupVarint<uint64_t> : public detail::GroupVarintBase<uint64_t> {
|
||||
|
||||
static uint8_t key(uint64_t x) {
|
||||
// __builtin_clzll is undefined for the x==0 case
|
||||
return 7 - (__builtin_clzll(x|1) / 8);
|
||||
return uint8_t(7 - (__builtin_clzll(x | 1) / 8));
|
||||
}
|
||||
|
||||
static uint8_t b0key(uint16_t x) { return x & 7; }
|
||||
static uint8_t b1key(uint16_t x) { return (x >> 3) & 7; }
|
||||
static uint8_t b2key(uint16_t x) { return (x >> 6) & 7; }
|
||||
static uint8_t b3key(uint16_t x) { return (x >> 9) & 7; }
|
||||
static uint8_t b4key(uint16_t x) { return (x >> 12) & 7; }
|
||||
static uint8_t b0key(uint16_t x) {
|
||||
return x & 7u;
|
||||
}
|
||||
static uint8_t b1key(uint16_t x) {
|
||||
return (x >> 3) & 7u;
|
||||
}
|
||||
static uint8_t b2key(uint16_t x) {
|
||||
return (x >> 6) & 7u;
|
||||
}
|
||||
static uint8_t b3key(uint16_t x) {
|
||||
return (x >> 9) & 7u;
|
||||
}
|
||||
static uint8_t b4key(uint16_t x) {
|
||||
return (x >> 12) & 7u;
|
||||
}
|
||||
|
||||
static const uint64_t kMask[];
|
||||
};
|
||||
@ -438,10 +485,7 @@ class GroupVarintEncoder {
|
||||
typedef GroupVarint<T> Base;
|
||||
typedef T type;
|
||||
|
||||
explicit GroupVarintEncoder(Output out)
|
||||
: out_(out),
|
||||
count_(0) {
|
||||
}
|
||||
explicit GroupVarintEncoder(Output out) : out_(out), count_(0) {}
|
||||
|
||||
~GroupVarintEncoder() {
|
||||
finish();
|
||||
@ -516,16 +560,14 @@ class GroupVarintDecoder {
|
||||
|
||||
GroupVarintDecoder() = default;
|
||||
|
||||
explicit GroupVarintDecoder(StringPiece data,
|
||||
size_t maxCount = (size_t)-1)
|
||||
: rrest_(data.end()),
|
||||
p_(data.data()),
|
||||
end_(data.end()),
|
||||
limit_(end_),
|
||||
pos_(0),
|
||||
count_(0),
|
||||
remaining_(maxCount) {
|
||||
}
|
||||
explicit GroupVarintDecoder(StringPiece data, size_t maxCount = (size_t)-1)
|
||||
: rrest_(data.end()),
|
||||
p_(data.data()),
|
||||
end_(data.end()),
|
||||
limit_(end_),
|
||||
pos_(0),
|
||||
count_(0),
|
||||
remaining_(maxCount) {}
|
||||
|
||||
void reset(StringPiece data, size_t maxCount = (size_t)-1) {
|
||||
rrest_ = data.end();
|
||||
@ -543,7 +585,7 @@ class GroupVarintDecoder {
|
||||
bool next(type* val) {
|
||||
if (pos_ == count_) {
|
||||
// refill
|
||||
size_t rem = end_ - p_;
|
||||
size_t rem = size_t(end_ - p_);
|
||||
if (rem == 0 || remaining_ == 0) {
|
||||
return false;
|
||||
}
|
||||
@ -575,7 +617,7 @@ class GroupVarintDecoder {
|
||||
}
|
||||
} else {
|
||||
// Can't decode a full group
|
||||
count_ = Base::partialCount(p_, end_ - p_);
|
||||
count_ = Base::partialCount(p_, size_t(end_ - p_));
|
||||
if (remaining_ >= count_) {
|
||||
remaining_ -= count_;
|
||||
p_ = end_;
|
||||
@ -598,7 +640,7 @@ class GroupVarintDecoder {
|
||||
CHECK(pos_ == count_ && (p_ == end_ || remaining_ == 0));
|
||||
// p_ may point to the internal buffer (tmp_), but we want
|
||||
// to return subpiece of the original data
|
||||
size_t size = end_ - p_;
|
||||
size_t size = size_t(end_ - p_);
|
||||
return StringPiece(rrest_ - size, rrest_);
|
||||
}
|
||||
|
||||
@ -617,6 +659,6 @@ class GroupVarintDecoder {
|
||||
typedef GroupVarintDecoder<uint32_t> GroupVarint32Decoder;
|
||||
typedef GroupVarintDecoder<uint64_t> GroupVarint64Decoder;
|
||||
|
||||
} // namespace folly
|
||||
} // namespace folly
|
||||
|
||||
#endif /* FOLLY_X64 || defined(__i386__) || FOLLY_PPC64 */
|
||||
|
447
ios/Pods/Folly/folly/Hash.h
generated
447
ios/Pods/Folly/folly/Hash.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2011-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -16,446 +16,5 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
|
||||
#include <folly/ApplyTuple.h>
|
||||
#include <folly/Bits.h>
|
||||
#include <folly/SpookyHashV1.h>
|
||||
#include <folly/SpookyHashV2.h>
|
||||
|
||||
/*
|
||||
* Various hashing functions.
|
||||
*/
|
||||
|
||||
namespace folly { namespace hash {
|
||||
|
||||
// This is a general-purpose way to create a single hash from multiple
|
||||
// hashable objects. hash_combine_generic takes a class Hasher implementing
|
||||
// hash<T>; hash_combine uses a default hasher StdHasher that uses std::hash.
|
||||
// hash_combine_generic hashes each argument and combines those hashes in
|
||||
// an order-dependent way to yield a new hash.
|
||||
|
||||
|
||||
// This is the Hash128to64 function from Google's cityhash (available
|
||||
// under the MIT License). We use it to reduce multiple 64 bit hashes
|
||||
// into a single hash.
|
||||
inline uint64_t hash_128_to_64(const uint64_t upper, const uint64_t lower) {
|
||||
// Murmur-inspired hashing.
|
||||
const uint64_t kMul = 0x9ddfea08eb382d69ULL;
|
||||
uint64_t a = (lower ^ upper) * kMul;
|
||||
a ^= (a >> 47);
|
||||
uint64_t b = (upper ^ a) * kMul;
|
||||
b ^= (b >> 47);
|
||||
b *= kMul;
|
||||
return b;
|
||||
}
|
||||
|
||||
// Never used, but gcc demands it.
|
||||
template <class Hasher>
|
||||
inline size_t hash_combine_generic() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
template <
|
||||
class Iter,
|
||||
class Hash = std::hash<typename std::iterator_traits<Iter>::value_type>>
|
||||
uint64_t hash_range(Iter begin,
|
||||
Iter end,
|
||||
uint64_t hash = 0,
|
||||
Hash hasher = Hash()) {
|
||||
for (; begin != end; ++begin) {
|
||||
hash = hash_128_to_64(hash, hasher(*begin));
|
||||
}
|
||||
return hash;
|
||||
}
|
||||
|
||||
inline uint32_t twang_32from64(uint64_t key);
|
||||
|
||||
template <class Hasher, typename T, typename... Ts>
|
||||
size_t hash_combine_generic(const T& t, const Ts&... ts) {
|
||||
size_t seed = Hasher::hash(t);
|
||||
if (sizeof...(ts) == 0) {
|
||||
return seed;
|
||||
}
|
||||
size_t remainder = hash_combine_generic<Hasher>(ts...);
|
||||
/* static */ if (sizeof(size_t) == sizeof(uint32_t)) {
|
||||
return twang_32from64((uint64_t(seed) << 32) | remainder);
|
||||
} else {
|
||||
return static_cast<size_t>(hash_128_to_64(seed, remainder));
|
||||
}
|
||||
}
|
||||
|
||||
// Simply uses std::hash to hash. Note that std::hash is not guaranteed
|
||||
// to be a very good hash function; provided std::hash doesn't collide on
|
||||
// the individual inputs, you are fine, but that won't be true for, say,
|
||||
// strings or pairs
|
||||
class StdHasher {
|
||||
public:
|
||||
template <typename T>
|
||||
static size_t hash(const T& t) {
|
||||
return std::hash<T>()(t);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename... Ts>
|
||||
size_t hash_combine(const T& t, const Ts&... ts) {
|
||||
return hash_combine_generic<StdHasher>(t, ts...);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
||||
/*
|
||||
* Thomas Wang 64 bit mix hash function
|
||||
*/
|
||||
|
||||
inline uint64_t twang_mix64(uint64_t key) {
|
||||
key = (~key) + (key << 21); // key *= (1 << 21) - 1; key -= 1;
|
||||
key = key ^ (key >> 24);
|
||||
key = key + (key << 3) + (key << 8); // key *= 1 + (1 << 3) + (1 << 8)
|
||||
key = key ^ (key >> 14);
|
||||
key = key + (key << 2) + (key << 4); // key *= 1 + (1 << 2) + (1 << 4)
|
||||
key = key ^ (key >> 28);
|
||||
key = key + (key << 31); // key *= 1 + (1 << 31)
|
||||
return key;
|
||||
}
|
||||
|
||||
/*
|
||||
* Inverse of twang_mix64
|
||||
*
|
||||
* Note that twang_unmix64 is significantly slower than twang_mix64.
|
||||
*/
|
||||
|
||||
inline uint64_t twang_unmix64(uint64_t key) {
|
||||
// See the comments in jenkins_rev_unmix32 for an explanation as to how this
|
||||
// was generated
|
||||
key *= 4611686016279904257U;
|
||||
key ^= (key >> 28) ^ (key >> 56);
|
||||
key *= 14933078535860113213U;
|
||||
key ^= (key >> 14) ^ (key >> 28) ^ (key >> 42) ^ (key >> 56);
|
||||
key *= 15244667743933553977U;
|
||||
key ^= (key >> 24) ^ (key >> 48);
|
||||
key = (key + 1) * 9223367638806167551U;
|
||||
return key;
|
||||
}
|
||||
|
||||
/*
|
||||
* Thomas Wang downscaling hash function
|
||||
*/
|
||||
|
||||
inline uint32_t twang_32from64(uint64_t key) {
|
||||
key = (~key) + (key << 18);
|
||||
key = key ^ (key >> 31);
|
||||
key = key * 21;
|
||||
key = key ^ (key >> 11);
|
||||
key = key + (key << 6);
|
||||
key = key ^ (key >> 22);
|
||||
return (uint32_t) key;
|
||||
}
|
||||
|
||||
/*
|
||||
* Robert Jenkins' reversible 32 bit mix hash function
|
||||
*/
|
||||
|
||||
inline uint32_t jenkins_rev_mix32(uint32_t key) {
|
||||
key += (key << 12); // key *= (1 + (1 << 12))
|
||||
key ^= (key >> 22);
|
||||
key += (key << 4); // key *= (1 + (1 << 4))
|
||||
key ^= (key >> 9);
|
||||
key += (key << 10); // key *= (1 + (1 << 10))
|
||||
key ^= (key >> 2);
|
||||
// key *= (1 + (1 << 7)) * (1 + (1 << 12))
|
||||
key += (key << 7);
|
||||
key += (key << 12);
|
||||
return key;
|
||||
}
|
||||
|
||||
/*
|
||||
* Inverse of jenkins_rev_mix32
|
||||
*
|
||||
* Note that jenkinks_rev_unmix32 is significantly slower than
|
||||
* jenkins_rev_mix32.
|
||||
*/
|
||||
|
||||
inline uint32_t jenkins_rev_unmix32(uint32_t key) {
|
||||
// These are the modular multiplicative inverses (in Z_2^32) of the
|
||||
// multiplication factors in jenkins_rev_mix32, in reverse order. They were
|
||||
// computed using the Extended Euclidean algorithm, see
|
||||
// http://en.wikipedia.org/wiki/Modular_multiplicative_inverse
|
||||
key *= 2364026753U;
|
||||
|
||||
// The inverse of a ^= (a >> n) is
|
||||
// b = a
|
||||
// for (int i = n; i < 32; i += n) {
|
||||
// b ^= (a >> i);
|
||||
// }
|
||||
key ^=
|
||||
(key >> 2) ^ (key >> 4) ^ (key >> 6) ^ (key >> 8) ^
|
||||
(key >> 10) ^ (key >> 12) ^ (key >> 14) ^ (key >> 16) ^
|
||||
(key >> 18) ^ (key >> 20) ^ (key >> 22) ^ (key >> 24) ^
|
||||
(key >> 26) ^ (key >> 28) ^ (key >> 30);
|
||||
key *= 3222273025U;
|
||||
key ^= (key >> 9) ^ (key >> 18) ^ (key >> 27);
|
||||
key *= 4042322161U;
|
||||
key ^= (key >> 22);
|
||||
key *= 16773121U;
|
||||
return key;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fowler / Noll / Vo (FNV) Hash
|
||||
* http://www.isthe.com/chongo/tech/comp/fnv/
|
||||
*/
|
||||
|
||||
const uint32_t FNV_32_HASH_START = 2166136261UL;
|
||||
const uint64_t FNV_64_HASH_START = 14695981039346656037ULL;
|
||||
|
||||
inline uint32_t fnv32(const char* s,
|
||||
uint32_t hash = FNV_32_HASH_START) {
|
||||
for (; *s; ++s) {
|
||||
hash += (hash << 1) + (hash << 4) + (hash << 7) +
|
||||
(hash << 8) + (hash << 24);
|
||||
hash ^= *s;
|
||||
}
|
||||
return hash;
|
||||
}
|
||||
|
||||
inline uint32_t fnv32_buf(const void* buf,
|
||||
size_t n,
|
||||
uint32_t hash = FNV_32_HASH_START) {
|
||||
// forcing signed char, since other platforms can use unsigned
|
||||
const signed char* char_buf = reinterpret_cast<const signed char*>(buf);
|
||||
|
||||
for (size_t i = 0; i < n; ++i) {
|
||||
hash += (hash << 1) + (hash << 4) + (hash << 7) +
|
||||
(hash << 8) + (hash << 24);
|
||||
hash ^= char_buf[i];
|
||||
}
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
inline uint32_t fnv32(const std::string& str,
|
||||
uint32_t hash = FNV_32_HASH_START) {
|
||||
return fnv32_buf(str.data(), str.size(), hash);
|
||||
}
|
||||
|
||||
inline uint64_t fnv64(const char* s,
|
||||
uint64_t hash = FNV_64_HASH_START) {
|
||||
for (; *s; ++s) {
|
||||
hash += (hash << 1) + (hash << 4) + (hash << 5) + (hash << 7) +
|
||||
(hash << 8) + (hash << 40);
|
||||
hash ^= *s;
|
||||
}
|
||||
return hash;
|
||||
}
|
||||
|
||||
inline uint64_t fnv64_buf(const void* buf,
|
||||
size_t n,
|
||||
uint64_t hash = FNV_64_HASH_START) {
|
||||
// forcing signed char, since other platforms can use unsigned
|
||||
const signed char* char_buf = reinterpret_cast<const signed char*>(buf);
|
||||
|
||||
for (size_t i = 0; i < n; ++i) {
|
||||
hash += (hash << 1) + (hash << 4) + (hash << 5) + (hash << 7) +
|
||||
(hash << 8) + (hash << 40);
|
||||
hash ^= char_buf[i];
|
||||
}
|
||||
return hash;
|
||||
}
|
||||
|
||||
inline uint64_t fnv64(const std::string& str,
|
||||
uint64_t hash = FNV_64_HASH_START) {
|
||||
return fnv64_buf(str.data(), str.size(), hash);
|
||||
}
|
||||
|
||||
/*
|
||||
* Paul Hsieh: http://www.azillionmonkeys.com/qed/hash.html
|
||||
*/
|
||||
|
||||
#define get16bits(d) folly::loadUnaligned<uint16_t>(d)
|
||||
|
||||
inline uint32_t hsieh_hash32_buf(const void* buf, size_t len) {
|
||||
// forcing signed char, since other platforms can use unsigned
|
||||
const unsigned char* s = reinterpret_cast<const unsigned char*>(buf);
|
||||
uint32_t hash = static_cast<uint32_t>(len);
|
||||
uint32_t tmp;
|
||||
size_t rem;
|
||||
|
||||
if (len <= 0 || buf == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
rem = len & 3;
|
||||
len >>= 2;
|
||||
|
||||
/* Main loop */
|
||||
for (;len > 0; len--) {
|
||||
hash += get16bits (s);
|
||||
tmp = (get16bits (s+2) << 11) ^ hash;
|
||||
hash = (hash << 16) ^ tmp;
|
||||
s += 2*sizeof (uint16_t);
|
||||
hash += hash >> 11;
|
||||
}
|
||||
|
||||
/* Handle end cases */
|
||||
switch (rem) {
|
||||
case 3:
|
||||
hash += get16bits(s);
|
||||
hash ^= hash << 16;
|
||||
hash ^= s[sizeof (uint16_t)] << 18;
|
||||
hash += hash >> 11;
|
||||
break;
|
||||
case 2:
|
||||
hash += get16bits(s);
|
||||
hash ^= hash << 11;
|
||||
hash += hash >> 17;
|
||||
break;
|
||||
case 1:
|
||||
hash += *s;
|
||||
hash ^= hash << 10;
|
||||
hash += hash >> 1;
|
||||
}
|
||||
|
||||
/* Force "avalanching" of final 127 bits */
|
||||
hash ^= hash << 3;
|
||||
hash += hash >> 5;
|
||||
hash ^= hash << 4;
|
||||
hash += hash >> 17;
|
||||
hash ^= hash << 25;
|
||||
hash += hash >> 6;
|
||||
|
||||
return hash;
|
||||
};
|
||||
|
||||
#undef get16bits
|
||||
|
||||
inline uint32_t hsieh_hash32(const char* s) {
|
||||
return hsieh_hash32_buf(s, std::strlen(s));
|
||||
}
|
||||
|
||||
inline uint32_t hsieh_hash32_str(const std::string& str) {
|
||||
return hsieh_hash32_buf(str.data(), str.size());
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
||||
} // namespace hash
|
||||
|
||||
template<class Key, class Enable = void>
|
||||
struct hasher;
|
||||
|
||||
struct Hash {
|
||||
template <class T>
|
||||
size_t operator()(const T& v) const {
|
||||
return hasher<T>()(v);
|
||||
}
|
||||
|
||||
template <class T, class... Ts>
|
||||
size_t operator()(const T& t, const Ts&... ts) const {
|
||||
return hash::hash_128_to_64((*this)(t), (*this)(ts...));
|
||||
}
|
||||
};
|
||||
|
||||
template<> struct hasher<int32_t> {
|
||||
size_t operator()(int32_t key) const {
|
||||
return hash::jenkins_rev_mix32(uint32_t(key));
|
||||
}
|
||||
};
|
||||
|
||||
template<> struct hasher<uint32_t> {
|
||||
size_t operator()(uint32_t key) const {
|
||||
return hash::jenkins_rev_mix32(key);
|
||||
}
|
||||
};
|
||||
|
||||
template<> struct hasher<int64_t> {
|
||||
size_t operator()(int64_t key) const {
|
||||
return static_cast<size_t>(hash::twang_mix64(uint64_t(key)));
|
||||
}
|
||||
};
|
||||
|
||||
template<> struct hasher<uint64_t> {
|
||||
size_t operator()(uint64_t key) const {
|
||||
return static_cast<size_t>(hash::twang_mix64(key));
|
||||
}
|
||||
};
|
||||
|
||||
template<> struct hasher<std::string> {
|
||||
size_t operator()(const std::string& key) const {
|
||||
return static_cast<size_t>(
|
||||
hash::SpookyHashV2::Hash64(key.data(), key.size(), 0));
|
||||
}
|
||||
};
|
||||
|
||||
template <class T>
|
||||
struct hasher<T, typename std::enable_if<std::is_enum<T>::value, void>::type> {
|
||||
size_t operator()(T key) const {
|
||||
return Hash()(static_cast<typename std::underlying_type<T>::type>(key));
|
||||
}
|
||||
};
|
||||
|
||||
template <class T1, class T2>
|
||||
struct hasher<std::pair<T1, T2>> {
|
||||
size_t operator()(const std::pair<T1, T2>& key) const {
|
||||
return Hash()(key.first, key.second);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename... Ts>
|
||||
struct hasher<std::tuple<Ts...>> {
|
||||
size_t operator() (const std::tuple<Ts...>& key) const {
|
||||
return applyTuple(Hash(), key);
|
||||
}
|
||||
};
|
||||
|
||||
// recursion
|
||||
template <size_t index, typename... Ts>
|
||||
struct TupleHasher {
|
||||
size_t operator()(std::tuple<Ts...> const& key) const {
|
||||
return hash::hash_combine(
|
||||
TupleHasher<index - 1, Ts...>()(key),
|
||||
std::get<index>(key));
|
||||
}
|
||||
};
|
||||
|
||||
// base
|
||||
template <typename... Ts>
|
||||
struct TupleHasher<0, Ts...> {
|
||||
size_t operator()(std::tuple<Ts...> const& key) const {
|
||||
// we could do std::hash here directly, but hash_combine hides all the
|
||||
// ugly templating implicitly
|
||||
return hash::hash_combine(std::get<0>(key));
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace folly
|
||||
|
||||
// Custom hash functions.
|
||||
namespace std {
|
||||
// Hash function for pairs. Requires default hash functions for both
|
||||
// items in the pair.
|
||||
template <typename T1, typename T2>
|
||||
struct hash<std::pair<T1, T2> > {
|
||||
public:
|
||||
size_t operator()(const std::pair<T1, T2>& x) const {
|
||||
return folly::hash::hash_combine(x.first, x.second);
|
||||
}
|
||||
};
|
||||
|
||||
// Hash function for tuples. Requires default hash functions for all types.
|
||||
template <typename... Ts>
|
||||
struct hash<std::tuple<Ts...>> {
|
||||
size_t operator()(std::tuple<Ts...> const& key) const {
|
||||
folly::TupleHasher<
|
||||
std::tuple_size<std::tuple<Ts...>>::value - 1, // start index
|
||||
Ts...> hasher;
|
||||
|
||||
return hasher(key);
|
||||
}
|
||||
};
|
||||
} // namespace std
|
||||
// shims:
|
||||
#include <folly/hash/Hash.h>
|
||||
|
157
ios/Pods/Folly/folly/IPAddress.h
generated
157
ios/Pods/Folly/folly/IPAddress.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2014-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -22,10 +22,10 @@
|
||||
#include <string>
|
||||
#include <utility> // std::pair
|
||||
|
||||
#include <folly/Range.h>
|
||||
#include <folly/IPAddressException.h>
|
||||
#include <folly/IPAddressV4.h>
|
||||
#include <folly/IPAddressV6.h>
|
||||
#include <folly/Range.h>
|
||||
#include <folly/detail/IPAddress.h>
|
||||
|
||||
namespace folly {
|
||||
@ -65,9 +65,15 @@ typedef std::pair<IPAddress, uint8_t> CIDRNetwork;
|
||||
* @encode
|
||||
*/
|
||||
class IPAddress {
|
||||
private:
|
||||
template <typename F>
|
||||
auto pick(F f) const {
|
||||
return isV4() ? f(asV4()) : f(asV6());
|
||||
}
|
||||
|
||||
public:
|
||||
// returns true iff the input string can be parsed as an ip-address
|
||||
static bool validate(StringPiece ip);
|
||||
static bool validate(StringPiece ip) noexcept;
|
||||
|
||||
// return the V4 representation of the address, converting it from V6 to V4 if
|
||||
// needed. Note that this will throw an IPAddressFormatException if the V6
|
||||
@ -85,11 +91,26 @@ class IPAddress {
|
||||
* is -1, will use /32 for IPv4 and /128 for IPv6)
|
||||
* @param [in] mask apply mask on the address or not,
|
||||
* e.g. 192.168.13.46/24 => 192.168.13.0/24
|
||||
* @return either pair with IPAddress network and uint8_t mask or
|
||||
* CIDRNetworkError
|
||||
*/
|
||||
static Expected<CIDRNetwork, CIDRNetworkError> tryCreateNetwork(
|
||||
StringPiece ipSlashCidr,
|
||||
int defaultCidr = -1,
|
||||
bool mask = true);
|
||||
|
||||
/**
|
||||
* Create a network and mask from a CIDR formatted address string.
|
||||
* Same as tryCreateNetwork() but throws IPAddressFormatException on error.
|
||||
* The implementation calls tryCreateNetwork(...) underneath
|
||||
*
|
||||
* @throws IPAddressFormatException if invalid address
|
||||
* @return pair with IPAddress network and uint8_t mask
|
||||
*/
|
||||
static CIDRNetwork createNetwork(
|
||||
StringPiece ipSlashCidr, int defaultCidr = -1, bool mask = true);
|
||||
StringPiece ipSlashCidr,
|
||||
int defaultCidr = -1,
|
||||
bool mask = true);
|
||||
|
||||
/**
|
||||
* Return a string representation of a CIDR block created with createNetwork.
|
||||
@ -106,6 +127,20 @@ class IPAddress {
|
||||
*/
|
||||
static IPAddress fromBinary(ByteRange bytes);
|
||||
|
||||
/**
|
||||
* Non-throwing version of fromBinary().
|
||||
* On failure returns IPAddressFormatError.
|
||||
*/
|
||||
static Expected<IPAddress, IPAddressFormatError> tryFromBinary(
|
||||
ByteRange bytes) noexcept;
|
||||
|
||||
/**
|
||||
* Tries to create a new IPAddress instance from provided string and
|
||||
* returns it on success. Returns IPAddressFormatError on failure.
|
||||
*/
|
||||
static Expected<IPAddress, IPAddressFormatError> tryFromString(
|
||||
StringPiece str) noexcept;
|
||||
|
||||
/**
|
||||
* Create an IPAddress from a 32bit long (network byte order).
|
||||
* @throws IPAddressFormatException
|
||||
@ -116,8 +151,9 @@ class IPAddress {
|
||||
|
||||
// Given 2 IPAddress,mask pairs extract the longest common IPAddress,
|
||||
// mask pair
|
||||
static CIDRNetwork longestCommonPrefix(const CIDRNetwork& one,
|
||||
const CIDRNetwork& two);
|
||||
static CIDRNetwork longestCommonPrefix(
|
||||
const CIDRNetwork& one,
|
||||
const CIDRNetwork& two);
|
||||
|
||||
/**
|
||||
* Constructs an uninitialized IPAddress.
|
||||
@ -143,25 +179,25 @@ class IPAddress {
|
||||
explicit IPAddress(const sockaddr* addr);
|
||||
|
||||
// Create an IPAddress from a V4 address
|
||||
/* implicit */ IPAddress(const IPAddressV4 ipV4Addr);
|
||||
/* implicit */ IPAddress(const in_addr addr);
|
||||
/* implicit */ IPAddress(const IPAddressV4 ipV4Addr) noexcept;
|
||||
/* implicit */ IPAddress(const in_addr addr) noexcept;
|
||||
|
||||
// Create an IPAddress from a V6 address
|
||||
/* implicit */ IPAddress(const IPAddressV6& ipV6Addr);
|
||||
/* implicit */ IPAddress(const in6_addr& addr);
|
||||
/* implicit */ IPAddress(const IPAddressV6& ipV6Addr) noexcept;
|
||||
/* implicit */ IPAddress(const in6_addr& addr) noexcept;
|
||||
|
||||
// Assign from V4 address
|
||||
IPAddress& operator=(const IPAddressV4& ipV4Addr);
|
||||
IPAddress& operator=(const IPAddressV4& ipV4Addr) noexcept;
|
||||
|
||||
// Assign from V6 address
|
||||
IPAddress& operator=(const IPAddressV6& ipV6Addr);
|
||||
IPAddress& operator=(const IPAddressV6& ipV6Addr) noexcept;
|
||||
|
||||
/**
|
||||
* Converts an IPAddress to an IPAddressV4 instance.
|
||||
* @note This is not some handy convenience wrapper to convert an IPv4 address
|
||||
* to a mapped IPv6 address. If you want that use
|
||||
* IPAddress::createIPv6(addr)
|
||||
* @throws IPAddressFormatException is not a V4 instance
|
||||
* @throws InvalidAddressFamilyException is not a V4 instance
|
||||
*/
|
||||
const IPAddressV4& asV4() const {
|
||||
if (UNLIKELY(!isV4())) {
|
||||
@ -182,10 +218,12 @@ class IPAddress {
|
||||
}
|
||||
|
||||
// Return sa_family_t of IPAddress
|
||||
sa_family_t family() const { return family_; }
|
||||
sa_family_t family() const {
|
||||
return family_;
|
||||
}
|
||||
|
||||
// Populate sockaddr_storage with an appropriate value
|
||||
int toSockaddrStorage(sockaddr_storage *dest, uint16_t port = 0) const {
|
||||
int toSockaddrStorage(sockaddr_storage* dest, uint16_t port = 0) const {
|
||||
if (dest == nullptr) {
|
||||
throw IPAddressFormatException("dest must not be null");
|
||||
}
|
||||
@ -193,7 +231,7 @@ class IPAddress {
|
||||
dest->ss_family = family();
|
||||
|
||||
if (isV4()) {
|
||||
sockaddr_in *sin = reinterpret_cast<sockaddr_in*>(dest);
|
||||
sockaddr_in* sin = reinterpret_cast<sockaddr_in*>(dest);
|
||||
sin->sin_addr = asV4().toAddr();
|
||||
sin->sin_port = port;
|
||||
#if defined(__APPLE__)
|
||||
@ -201,7 +239,7 @@ class IPAddress {
|
||||
#endif
|
||||
return sizeof(*sin);
|
||||
} else if (isV6()) {
|
||||
sockaddr_in6 *sin = reinterpret_cast<sockaddr_in6*>(dest);
|
||||
sockaddr_in6* sin = reinterpret_cast<sockaddr_in6*>(dest);
|
||||
sin->sin6_addr = asV6().toAddr();
|
||||
sin->sin6_port = port;
|
||||
sin->sin6_scope_id = asV6().getScopeId();
|
||||
@ -254,43 +292,49 @@ class IPAddress {
|
||||
}
|
||||
|
||||
// @return true if address is uninitialized
|
||||
bool empty() const { return (family_ == AF_UNSPEC); }
|
||||
bool empty() const {
|
||||
return family_ == AF_UNSPEC;
|
||||
}
|
||||
|
||||
// @return true if address is initialized
|
||||
explicit operator bool() const { return !empty(); }
|
||||
explicit operator bool() const {
|
||||
return !empty();
|
||||
}
|
||||
|
||||
// @return true if this is an IPAddressV4 instance
|
||||
bool isV4() const { return (family_ == AF_INET); }
|
||||
bool isV4() const {
|
||||
return family_ == AF_INET;
|
||||
}
|
||||
|
||||
// @return true if this is an IPAddressV6 instance
|
||||
bool isV6() const { return (family_ == AF_INET6); }
|
||||
bool isV6() const {
|
||||
return family_ == AF_INET6;
|
||||
}
|
||||
|
||||
// @return true if this address is all zeros
|
||||
bool isZero() const {
|
||||
return isV4() ? asV4().isZero()
|
||||
: asV6().isZero();
|
||||
return pick([&](auto& _) { return _.isZero(); });
|
||||
}
|
||||
|
||||
// Number of bits in the address representation.
|
||||
size_t bitCount() const {
|
||||
return isV4() ? IPAddressV4::bitCount()
|
||||
: IPAddressV6::bitCount();
|
||||
return pick([&](auto& _) { return _.bitCount(); });
|
||||
}
|
||||
// Number of bytes in the address representation.
|
||||
size_t byteCount() const {
|
||||
return bitCount() / 8;
|
||||
}
|
||||
//get nth most significant bit - 0 indexed
|
||||
// get nth most significant bit - 0 indexed
|
||||
bool getNthMSBit(size_t bitIndex) const {
|
||||
return detail::getNthMSBitImpl(*this, bitIndex, family());
|
||||
}
|
||||
//get nth most significant byte - 0 indexed
|
||||
// get nth most significant byte - 0 indexed
|
||||
uint8_t getNthMSByte(size_t byteIndex) const;
|
||||
//get nth bit - 0 indexed
|
||||
// get nth bit - 0 indexed
|
||||
bool getNthLSBit(size_t bitIndex) const {
|
||||
return getNthMSBit(bitCount() - bitIndex - 1);
|
||||
}
|
||||
//get nth byte - 0 indexed
|
||||
// get nth byte - 0 indexed
|
||||
uint8_t getNthLSByte(size_t byteIndex) const {
|
||||
return getNthMSByte(byteCount() - byteIndex - 1);
|
||||
}
|
||||
@ -302,32 +346,27 @@ class IPAddress {
|
||||
* {family:'AF_INET|AF_INET6', addr:'address', hash:long}.
|
||||
*/
|
||||
std::string toJson() const {
|
||||
return isV4() ? asV4().toJson()
|
||||
: asV6().toJson();
|
||||
return pick([&](auto& _) { return _.toJson(); });
|
||||
}
|
||||
|
||||
// Hash of address
|
||||
std::size_t hash() const {
|
||||
return isV4() ? asV4().hash()
|
||||
: asV6().hash();
|
||||
return pick([&](auto& _) { return _.hash(); });
|
||||
}
|
||||
|
||||
// Return true if the address qualifies as localhost.
|
||||
bool isLoopback() const {
|
||||
return isV4() ? asV4().isLoopback()
|
||||
: asV6().isLoopback();
|
||||
return pick([&](auto& _) { return _.isLoopback(); });
|
||||
}
|
||||
|
||||
// Return true if the address qualifies as link local
|
||||
bool isLinkLocal() const {
|
||||
return isV4() ? asV4().isLinkLocal()
|
||||
: asV6().isLinkLocal();
|
||||
return pick([&](auto& _) { return _.isLinkLocal(); });
|
||||
}
|
||||
|
||||
// Return true if the address qualifies as broadcast.
|
||||
bool isLinkLocalBroadcast() const {
|
||||
return isV4() ? asV4().isLinkLocalBroadcast()
|
||||
: asV6().isLinkLocalBroadcast();
|
||||
return pick([&](auto& _) { return _.isLinkLocalBroadcast(); });
|
||||
}
|
||||
|
||||
/**
|
||||
@ -337,8 +376,7 @@ class IPAddress {
|
||||
* 2000::/3, ffxe::/16.
|
||||
*/
|
||||
bool isNonroutable() const {
|
||||
return isV4() ? asV4().isNonroutable()
|
||||
: asV6().isNonroutable();
|
||||
return pick([&](auto& _) { return _.isNonroutable(); });
|
||||
}
|
||||
|
||||
/**
|
||||
@ -346,14 +384,12 @@ class IPAddress {
|
||||
* (for example, 192.168.xxx.xxx or fc00::/7 addresses)
|
||||
*/
|
||||
bool isPrivate() const {
|
||||
return isV4() ? asV4().isPrivate()
|
||||
: asV6().isPrivate();
|
||||
return pick([&](auto& _) { return _.isPrivate(); });
|
||||
}
|
||||
|
||||
// Return true if the address is a multicast address.
|
||||
bool isMulticast() const {
|
||||
return isV4() ? asV4().isMulticast()
|
||||
: asV6().isMulticast();
|
||||
return pick([&](auto& _) { return _.isMulticast(); });
|
||||
}
|
||||
|
||||
/**
|
||||
@ -363,8 +399,7 @@ class IPAddress {
|
||||
* @return IPAddress instance with bits set to 0
|
||||
*/
|
||||
IPAddress mask(uint8_t numBits) const {
|
||||
return isV4() ? IPAddress(asV4().mask(numBits))
|
||||
: IPAddress(asV6().mask(numBits));
|
||||
return pick([&](auto& _) { return IPAddress(_.mask(numBits)); });
|
||||
}
|
||||
|
||||
/**
|
||||
@ -373,8 +408,7 @@ class IPAddress {
|
||||
* @throws IPAddressFormatException on inet_ntop error
|
||||
*/
|
||||
std::string str() const {
|
||||
return isV4() ? asV4().str()
|
||||
: asV6().str();
|
||||
return pick([&](auto& _) { return _.str(); });
|
||||
}
|
||||
|
||||
/**
|
||||
@ -383,21 +417,24 @@ class IPAddress {
|
||||
* this is the hex representation with : characters inserted every 4 digits.
|
||||
*/
|
||||
std::string toFullyQualified() const {
|
||||
return isV4() ? asV4().toFullyQualified()
|
||||
: asV6().toFullyQualified();
|
||||
return pick([&](auto& _) { return _.toFullyQualified(); });
|
||||
}
|
||||
|
||||
/// Same as toFullyQualified but append to an output string.
|
||||
void toFullyQualifiedAppend(std::string& out) const {
|
||||
return pick([&](auto& _) { return _.toFullyQualifiedAppend(out); });
|
||||
}
|
||||
|
||||
// Address version (4 or 6)
|
||||
uint8_t version() const {
|
||||
return isV4() ? asV4().version()
|
||||
: asV6().version();
|
||||
return pick([&](auto& _) { return _.version(); });
|
||||
}
|
||||
|
||||
/**
|
||||
* Access to address bytes, in network byte order.
|
||||
*/
|
||||
const unsigned char* bytes() const {
|
||||
return isV4() ? asV4().bytes() : asV6().bytes();
|
||||
return pick([&](auto& _) { return _.bytes(); });
|
||||
}
|
||||
|
||||
private:
|
||||
@ -408,11 +445,11 @@ class IPAddress {
|
||||
IPAddressV4 ipV4Addr;
|
||||
IPAddressV6 ipV6Addr;
|
||||
// default constructor
|
||||
IPAddressV46() {
|
||||
IPAddressV46() noexcept {
|
||||
std::memset(this, 0, sizeof(IPAddressV46));
|
||||
}
|
||||
explicit IPAddressV46(const IPAddressV4& addr): ipV4Addr(addr) {}
|
||||
explicit IPAddressV46(const IPAddressV6& addr): ipV6Addr(addr) {}
|
||||
explicit IPAddressV46(const IPAddressV4& addr) noexcept : ipV4Addr(addr) {}
|
||||
explicit IPAddressV46(const IPAddressV6& addr) noexcept : ipV6Addr(addr) {}
|
||||
} IPAddressV46;
|
||||
IPAddressV46 addr_;
|
||||
sa_family_t family_;
|
||||
@ -451,13 +488,13 @@ inline bool operator>=(const IPAddress& a, const IPAddress& b) {
|
||||
return !(a < b);
|
||||
}
|
||||
|
||||
} // folly
|
||||
} // namespace folly
|
||||
|
||||
namespace std {
|
||||
template<>
|
||||
template <>
|
||||
struct hash<folly::IPAddress> {
|
||||
size_t operator()(const folly::IPAddress& addr) const {
|
||||
return addr.hash();
|
||||
}
|
||||
};
|
||||
} // std
|
||||
} // namespace std
|
||||
|
31
ios/Pods/Folly/folly/IPAddressException.h
generated
31
ios/Pods/Folly/folly/IPAddressException.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2014-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -13,21 +13,37 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <exception>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
#include <folly/CPortability.h>
|
||||
#include <folly/detail/IPAddress.h>
|
||||
|
||||
namespace folly {
|
||||
|
||||
/**
|
||||
* Error codes for non-throwing interface of IPAddress family of functions.
|
||||
*/
|
||||
enum class IPAddressFormatError { INVALID_IP, UNSUPPORTED_ADDR_FAMILY };
|
||||
|
||||
/**
|
||||
* Wraps error from parsing IP/MASK string
|
||||
*/
|
||||
enum class CIDRNetworkError {
|
||||
INVALID_DEFAULT_CIDR,
|
||||
INVALID_IP_SLASH_CIDR,
|
||||
INVALID_IP,
|
||||
INVALID_CIDR,
|
||||
CIDR_MISMATCH,
|
||||
};
|
||||
|
||||
/**
|
||||
* Exception for invalid IP addresses.
|
||||
*/
|
||||
class IPAddressFormatException : public std::exception {
|
||||
class FOLLY_EXPORT IPAddressFormatException : public std::exception {
|
||||
public:
|
||||
explicit IPAddressFormatException(std::string msg) noexcept
|
||||
: msg_(std::move(msg)) {}
|
||||
@ -37,8 +53,8 @@ class IPAddressFormatException : public std::exception {
|
||||
default;
|
||||
IPAddressFormatException& operator=(IPAddressFormatException&&) = default;
|
||||
|
||||
virtual ~IPAddressFormatException() noexcept {}
|
||||
virtual const char *what(void) const noexcept {
|
||||
~IPAddressFormatException() noexcept override {}
|
||||
const char* what() const noexcept override {
|
||||
return msg_.c_str();
|
||||
}
|
||||
|
||||
@ -46,7 +62,8 @@ class IPAddressFormatException : public std::exception {
|
||||
std::string msg_;
|
||||
};
|
||||
|
||||
class InvalidAddressFamilyException : public IPAddressFormatException {
|
||||
class FOLLY_EXPORT InvalidAddressFamilyException
|
||||
: public IPAddressFormatException {
|
||||
public:
|
||||
explicit InvalidAddressFamilyException(std::string msg) noexcept
|
||||
: IPAddressFormatException(std::move(msg)) {}
|
||||
@ -62,4 +79,4 @@ class InvalidAddressFamilyException : public IPAddressFormatException {
|
||||
default;
|
||||
};
|
||||
|
||||
} // folly
|
||||
} // namespace folly
|
||||
|
105
ios/Pods/Folly/folly/IPAddressV4.h
generated
105
ios/Pods/Folly/folly/IPAddressV4.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2014-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -22,9 +22,12 @@
|
||||
#include <functional>
|
||||
#include <iosfwd>
|
||||
|
||||
#include <folly/Hash.h>
|
||||
#include <folly/Expected.h>
|
||||
#include <folly/FBString.h>
|
||||
#include <folly/IPAddressException.h>
|
||||
#include <folly/Range.h>
|
||||
#include <folly/detail/IPAddress.h>
|
||||
#include <folly/hash/Hash.h>
|
||||
|
||||
namespace folly {
|
||||
|
||||
@ -54,8 +57,12 @@ typedef std::array<uint8_t, 4> ByteArray4;
|
||||
*/
|
||||
class IPAddressV4 {
|
||||
public:
|
||||
// Max size of std::string returned by toFullyQualified.
|
||||
static constexpr size_t kMaxToFullyQualifiedSize =
|
||||
4 /*words*/ * 3 /*max chars per word*/ + 3 /*separators*/;
|
||||
|
||||
// returns true iff the input string can be parsed as an ipv4-address
|
||||
static bool validate(StringPiece ip);
|
||||
static bool validate(StringPiece ip) noexcept;
|
||||
|
||||
// create an IPAddressV4 instance from a uint32_t (network byte order)
|
||||
static IPAddressV4 fromLong(uint32_t src);
|
||||
@ -66,19 +73,36 @@ class IPAddressV4 {
|
||||
* Create a new IPAddress instance from the provided binary data.
|
||||
* @throws IPAddressFormatException if the input length is not 4 bytes.
|
||||
*/
|
||||
static IPAddressV4 fromBinary(ByteRange bytes) {
|
||||
IPAddressV4 addr;
|
||||
addr.setFromBinary(bytes);
|
||||
return addr;
|
||||
}
|
||||
static IPAddressV4 fromBinary(ByteRange bytes);
|
||||
|
||||
/**
|
||||
* Non-throwing version of fromBinary().
|
||||
* On failure returns IPAddressFormatError.
|
||||
*/
|
||||
static Expected<IPAddressV4, IPAddressFormatError> tryFromBinary(
|
||||
ByteRange bytes) noexcept;
|
||||
|
||||
/**
|
||||
* Tries to create a new IPAddressV4 instance from provided string and
|
||||
* returns it on success. Returns IPAddressFormatError on failure.
|
||||
*/
|
||||
static Expected<IPAddressV4, IPAddressFormatError> tryFromString(
|
||||
StringPiece str) noexcept;
|
||||
|
||||
/**
|
||||
* Returns the address as a Range.
|
||||
*/
|
||||
ByteRange toBinary() const {
|
||||
return ByteRange((const unsigned char *) &addr_.inAddr_.s_addr, 4);
|
||||
return ByteRange((const unsigned char*)&addr_.inAddr_.s_addr, 4);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new IPAddress instance from the in-addr.arpa representation.
|
||||
* @throws IPAddressFormatException if the input is not a valid in-addr.arpa
|
||||
* representation
|
||||
*/
|
||||
static IPAddressV4 fromInverseArpaName(const std::string& arpaname);
|
||||
|
||||
/**
|
||||
* Convert a IPv4 address string to a long in network byte order.
|
||||
* @param [in] ip the address to convert
|
||||
@ -101,10 +125,10 @@ class IPAddressV4 {
|
||||
explicit IPAddressV4(StringPiece ip);
|
||||
|
||||
// ByteArray4 constructor
|
||||
explicit IPAddressV4(const ByteArray4& src);
|
||||
explicit IPAddressV4(const ByteArray4& src) noexcept;
|
||||
|
||||
// in_addr constructor
|
||||
explicit IPAddressV4(const in_addr src);
|
||||
explicit IPAddressV4(const in_addr src) noexcept;
|
||||
|
||||
// Return the V6 mapped representation of the address.
|
||||
IPAddressV6 createIPv6() const;
|
||||
@ -129,7 +153,9 @@ class IPAddressV4 {
|
||||
* @see IPAddress#bitCount
|
||||
* @returns 32
|
||||
*/
|
||||
static size_t bitCount() { return 32; }
|
||||
static constexpr size_t bitCount() {
|
||||
return 32;
|
||||
}
|
||||
|
||||
/**
|
||||
* @See IPAddress#toJson
|
||||
@ -183,8 +209,12 @@ class IPAddressV4 {
|
||||
// @see IPAddress#str
|
||||
std::string str() const;
|
||||
|
||||
std::string toInverseArpaName() const;
|
||||
|
||||
// return underlying in_addr structure
|
||||
in_addr toAddr() const { return addr_.inAddr_; }
|
||||
in_addr toAddr() const {
|
||||
return addr_.inAddr_;
|
||||
}
|
||||
|
||||
sockaddr_in toSockAddr() const {
|
||||
sockaddr_in addr;
|
||||
@ -201,10 +231,17 @@ class IPAddressV4 {
|
||||
}
|
||||
|
||||
// @see IPAddress#toFullyQualified
|
||||
std::string toFullyQualified() const { return str(); }
|
||||
std::string toFullyQualified() const {
|
||||
return str();
|
||||
}
|
||||
|
||||
// @see IPAddress#toFullyQualifiedAppend
|
||||
void toFullyQualifiedAppend(std::string& out) const;
|
||||
|
||||
// @see IPAddress#version
|
||||
size_t version() const { return 4; }
|
||||
uint8_t version() const {
|
||||
return 4;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the mask associated with the given number of bits.
|
||||
@ -222,44 +259,48 @@ class IPAddressV4 {
|
||||
const CIDRNetworkV4& one,
|
||||
const CIDRNetworkV4& two);
|
||||
// Number of bytes in the address representation.
|
||||
static size_t byteCount() { return 4; }
|
||||
//get nth most significant bit - 0 indexed
|
||||
static size_t byteCount() {
|
||||
return 4;
|
||||
}
|
||||
// get nth most significant bit - 0 indexed
|
||||
bool getNthMSBit(size_t bitIndex) const {
|
||||
return detail::getNthMSBitImpl(*this, bitIndex, AF_INET);
|
||||
}
|
||||
//get nth most significant byte - 0 indexed
|
||||
// get nth most significant byte - 0 indexed
|
||||
uint8_t getNthMSByte(size_t byteIndex) const;
|
||||
//get nth bit - 0 indexed
|
||||
// get nth bit - 0 indexed
|
||||
bool getNthLSBit(size_t bitIndex) const {
|
||||
return getNthMSBit(bitCount() - bitIndex - 1);
|
||||
}
|
||||
//get nth byte - 0 indexed
|
||||
// get nth byte - 0 indexed
|
||||
uint8_t getNthLSByte(size_t byteIndex) const {
|
||||
return getNthMSByte(byteCount() - byteIndex - 1);
|
||||
}
|
||||
|
||||
const unsigned char* bytes() const { return addr_.bytes_.data(); }
|
||||
const unsigned char* bytes() const {
|
||||
return addr_.bytes_.data();
|
||||
}
|
||||
|
||||
private:
|
||||
union AddressStorage {
|
||||
static_assert(sizeof(in_addr) == sizeof(ByteArray4),
|
||||
"size of in_addr and ByteArray4 are different");
|
||||
static_assert(
|
||||
sizeof(in_addr) == sizeof(ByteArray4),
|
||||
"size of in_addr and ByteArray4 are different");
|
||||
in_addr inAddr_;
|
||||
ByteArray4 bytes_;
|
||||
AddressStorage() {
|
||||
std::memset(this, 0, sizeof(AddressStorage));
|
||||
}
|
||||
explicit AddressStorage(const ByteArray4 bytes): bytes_(bytes) {}
|
||||
explicit AddressStorage(const in_addr addr): inAddr_(addr) {}
|
||||
explicit AddressStorage(const ByteArray4 bytes) : bytes_(bytes) {}
|
||||
explicit AddressStorage(const in_addr addr) : inAddr_(addr) {}
|
||||
} addr_;
|
||||
|
||||
static const std::array<ByteArray4, 33> masks_;
|
||||
|
||||
/**
|
||||
* Set the current IPAddressV4 object to have the address specified by bytes.
|
||||
* @throws IPAddressFormatException if bytes.size() is not 4.
|
||||
* Returns IPAddressFormatError if bytes.size() is not 4.
|
||||
*/
|
||||
void setFromBinary(ByteRange bytes);
|
||||
Expected<Unit, IPAddressFormatError> trySetFromBinary(
|
||||
ByteRange bytes) noexcept;
|
||||
};
|
||||
|
||||
// boost::hash uses hash_value() so this allows boost::hash to work
|
||||
@ -294,13 +335,13 @@ inline bool operator>=(const IPAddressV4& a, const IPAddressV4& b) {
|
||||
return !(a < b);
|
||||
}
|
||||
|
||||
} // folly
|
||||
} // namespace folly
|
||||
|
||||
namespace std {
|
||||
template<>
|
||||
template <>
|
||||
struct hash<folly::IPAddressV4> {
|
||||
size_t operator()(const folly::IPAddressV4 addr) const {
|
||||
return addr.hash();
|
||||
}
|
||||
};
|
||||
} // std
|
||||
} // namespace std
|
||||
|
191
ios/Pods/Folly/folly/IPAddressV6.h
generated
191
ios/Pods/Folly/folly/IPAddressV6.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2014-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -24,9 +24,13 @@
|
||||
#include <map>
|
||||
#include <stdexcept>
|
||||
|
||||
#include <folly/Hash.h>
|
||||
#include <folly/Expected.h>
|
||||
#include <folly/FBString.h>
|
||||
#include <folly/IPAddressException.h>
|
||||
#include <folly/Optional.h>
|
||||
#include <folly/Range.h>
|
||||
#include <folly/detail/IPAddress.h>
|
||||
#include <folly/hash/Hash.h>
|
||||
|
||||
namespace folly {
|
||||
|
||||
@ -68,7 +72,9 @@ class IPAddressV6 {
|
||||
public:
|
||||
// V6 Address Type
|
||||
enum Type {
|
||||
TEREDO, T6TO4, NORMAL,
|
||||
TEREDO,
|
||||
T6TO4,
|
||||
NORMAL,
|
||||
};
|
||||
// A constructor parameter to indicate that we should create a link-local
|
||||
// IPAddressV6.
|
||||
@ -85,26 +91,43 @@ class IPAddressV6 {
|
||||
|
||||
// Size of std::string returned by toFullyQualified.
|
||||
static constexpr size_t kToFullyQualifiedSize =
|
||||
8 /*words*/ * 4 /*hex chars per word*/ + 7 /*separators*/;
|
||||
8 /*words*/ * 4 /*hex chars per word*/ + 7 /*separators*/;
|
||||
|
||||
// returns true iff the input string can be parsed as an ipv6-address
|
||||
static bool validate(StringPiece ip);
|
||||
static bool validate(StringPiece ip) noexcept;
|
||||
|
||||
/**
|
||||
* Create a new IPAddress instance from the provided binary data.
|
||||
* @throws IPAddressFormatException if the input length is not 16 bytes.
|
||||
*/
|
||||
static IPAddressV6 fromBinary(ByteRange bytes) {
|
||||
IPAddressV6 addr;
|
||||
addr.setFromBinary(bytes);
|
||||
return addr;
|
||||
}
|
||||
static IPAddressV6 fromBinary(ByteRange bytes);
|
||||
|
||||
/**
|
||||
* Non-throwing version of fromBinary().
|
||||
* On failure returns IPAddressFormatError.
|
||||
*/
|
||||
static Expected<IPAddressV6, IPAddressFormatError> tryFromBinary(
|
||||
ByteRange bytes) noexcept;
|
||||
|
||||
/**
|
||||
* Tries to create a new IPAddressV6 instance from provided string and
|
||||
* returns it on success. Returns IPAddressFormatError on failure.
|
||||
*/
|
||||
static Expected<IPAddressV6, IPAddressFormatError> tryFromString(
|
||||
StringPiece str) noexcept;
|
||||
|
||||
/**
|
||||
* Create a new IPAddress instance from the ip6.arpa representation.
|
||||
* @throws IPAddressFormatException if the input is not a valid ip6.arpa
|
||||
* representation
|
||||
*/
|
||||
static IPAddressV6 fromInverseArpaName(const std::string& arpaname);
|
||||
|
||||
/**
|
||||
* Returns the address as a Range.
|
||||
*/
|
||||
ByteRange toBinary() const {
|
||||
return ByteRange((const unsigned char *) &addr_.in6Addr_.s6_addr, 16);
|
||||
return ByteRange((const unsigned char*)&addr_.in6Addr_.s6_addr, 16);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -120,13 +143,13 @@ class IPAddressV6 {
|
||||
explicit IPAddressV6(StringPiece ip);
|
||||
|
||||
// ByteArray16 constructor
|
||||
explicit IPAddressV6(const ByteArray16& src);
|
||||
explicit IPAddressV6(const ByteArray16& src) noexcept;
|
||||
|
||||
// in6_addr constructor
|
||||
explicit IPAddressV6(const in6_addr& src);
|
||||
explicit IPAddressV6(const in6_addr& src) noexcept;
|
||||
|
||||
// sockaddr_in6 constructor
|
||||
explicit IPAddressV6(const sockaddr_in6& src);
|
||||
explicit IPAddressV6(const sockaddr_in6& src) noexcept;
|
||||
|
||||
/**
|
||||
* Create a link-local IPAddressV6 from the specified ethernet MAC address.
|
||||
@ -163,7 +186,9 @@ class IPAddressV6 {
|
||||
* @see IPAddress#bitCount
|
||||
* @returns 128
|
||||
*/
|
||||
static size_t bitCount() { return 128; }
|
||||
static constexpr size_t bitCount() {
|
||||
return 128;
|
||||
}
|
||||
|
||||
/**
|
||||
* @see IPAddress#toJson
|
||||
@ -180,8 +205,8 @@ class IPAddressV6 {
|
||||
bool inSubnet(const IPAddressV6& subnet, uint8_t cidr) const {
|
||||
return inSubnetWithMask(subnet, fetchMask(cidr));
|
||||
}
|
||||
bool inSubnetWithMask(const IPAddressV6& subnet,
|
||||
const ByteArray16& mask) const;
|
||||
bool inSubnetWithMask(const IPAddressV6& subnet, const ByteArray16& mask)
|
||||
const;
|
||||
|
||||
// @see IPAddress#isLoopback
|
||||
bool isLoopback() const;
|
||||
@ -209,6 +234,26 @@ class IPAddressV6 {
|
||||
*/
|
||||
bool isLinkLocal() const;
|
||||
|
||||
/**
|
||||
* Return the mac address if this is a link-local IPv6 address.
|
||||
*
|
||||
* @return an Optional<MacAddress> union representing the mac address.
|
||||
*
|
||||
* If the address is not a link-local one it will return an empty Optional.
|
||||
* You can use Optional::value() to check whether the mac address is not null.
|
||||
*/
|
||||
Optional<MacAddress> getMacAddressFromLinkLocal() const;
|
||||
|
||||
/**
|
||||
* Return the mac address if this is an auto-configured IPv6 address based on
|
||||
* EUI-64
|
||||
*
|
||||
* @return an Optional<MacAddress> union representing the mac address.
|
||||
* If the address is not based on EUI-64 it will return an empty Optional.
|
||||
* You can use Optional::value() to check whether the mac address is not null.
|
||||
*/
|
||||
Optional<MacAddress> getMacAddressFromEUI64() const;
|
||||
|
||||
/**
|
||||
* Return true if this is a multicast address.
|
||||
*/
|
||||
@ -238,9 +283,13 @@ class IPAddressV6 {
|
||||
IPAddressV6 mask(size_t numBits) const;
|
||||
|
||||
// return underlying in6_addr structure
|
||||
in6_addr toAddr() const { return addr_.in6Addr_; }
|
||||
in6_addr toAddr() const {
|
||||
return addr_.in6Addr_;
|
||||
}
|
||||
|
||||
uint16_t getScopeId() const { return scope_; }
|
||||
uint16_t getScopeId() const {
|
||||
return scope_;
|
||||
}
|
||||
void setScopeId(uint16_t scope) {
|
||||
scope_ = scope;
|
||||
}
|
||||
@ -263,11 +312,18 @@ class IPAddressV6 {
|
||||
// @see IPAddress#toFullyQualified
|
||||
std::string toFullyQualified() const;
|
||||
|
||||
// @see IPAddress#toFullyQualifiedAppend
|
||||
void toFullyQualifiedAppend(std::string& out) const;
|
||||
|
||||
std::string toInverseArpaName() const;
|
||||
|
||||
// @see IPAddress#str
|
||||
std::string str() const;
|
||||
|
||||
// @see IPAddress#version
|
||||
size_t version() const { return 6; }
|
||||
uint8_t version() const {
|
||||
return 6;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the solicited-node multicast address for this address.
|
||||
@ -289,31 +345,60 @@ class IPAddressV6 {
|
||||
const CIDRNetworkV6& one,
|
||||
const CIDRNetworkV6& two);
|
||||
// Number of bytes in the address representation.
|
||||
static constexpr size_t byteCount() { return 16; }
|
||||
static constexpr size_t byteCount() {
|
||||
return 16;
|
||||
}
|
||||
|
||||
//get nth most significant bit - 0 indexed
|
||||
// get nth most significant bit - 0 indexed
|
||||
bool getNthMSBit(size_t bitIndex) const {
|
||||
return detail::getNthMSBitImpl(*this, bitIndex, AF_INET6);
|
||||
}
|
||||
//get nth most significant byte - 0 indexed
|
||||
// get nth most significant byte - 0 indexed
|
||||
uint8_t getNthMSByte(size_t byteIndex) const;
|
||||
//get nth bit - 0 indexed
|
||||
// get nth bit - 0 indexed
|
||||
bool getNthLSBit(size_t bitIndex) const {
|
||||
return getNthMSBit(bitCount() - bitIndex - 1);
|
||||
}
|
||||
//get nth byte - 0 indexed
|
||||
// get nth byte - 0 indexed
|
||||
uint8_t getNthLSByte(size_t byteIndex) const {
|
||||
return getNthMSByte(byteCount() - byteIndex - 1);
|
||||
}
|
||||
|
||||
const unsigned char* bytes() const { return addr_.in6Addr_.s6_addr; }
|
||||
protected:
|
||||
const unsigned char* bytes() const {
|
||||
return addr_.in6Addr_.s6_addr;
|
||||
}
|
||||
|
||||
protected:
|
||||
/**
|
||||
* Helper that returns true if the address is in the binary subnet specified
|
||||
* by addr.
|
||||
*/
|
||||
bool inBinarySubnet(const std::array<uint8_t, 2> addr,
|
||||
size_t numBits) const;
|
||||
bool inBinarySubnet(const std::array<uint8_t, 2> addr, size_t numBits) const;
|
||||
|
||||
private:
|
||||
auto tie() const {
|
||||
return std::tie(addr_.bytes_, scope_);
|
||||
}
|
||||
|
||||
public:
|
||||
friend inline bool operator==(const IPAddressV6& a, const IPAddressV6& b) {
|
||||
return a.tie() == b.tie();
|
||||
}
|
||||
friend inline bool operator!=(const IPAddressV6& a, const IPAddressV6& b) {
|
||||
return a.tie() != b.tie();
|
||||
}
|
||||
friend inline bool operator<(const IPAddressV6& a, const IPAddressV6& b) {
|
||||
return a.tie() < b.tie();
|
||||
}
|
||||
friend inline bool operator>(const IPAddressV6& a, const IPAddressV6& b) {
|
||||
return a.tie() > b.tie();
|
||||
}
|
||||
friend inline bool operator<=(const IPAddressV6& a, const IPAddressV6& b) {
|
||||
return a.tie() <= b.tie();
|
||||
}
|
||||
friend inline bool operator>=(const IPAddressV6& a, const IPAddressV6& b) {
|
||||
return a.tie() >= b.tie();
|
||||
}
|
||||
|
||||
private:
|
||||
union AddressStorage {
|
||||
@ -322,8 +407,8 @@ class IPAddressV6 {
|
||||
AddressStorage() {
|
||||
std::memset(this, 0, sizeof(AddressStorage));
|
||||
}
|
||||
explicit AddressStorage(const ByteArray16& bytes): bytes_(bytes) {}
|
||||
explicit AddressStorage(const in6_addr& addr): in6Addr_(addr) {}
|
||||
explicit AddressStorage(const ByteArray16& bytes) : bytes_(bytes) {}
|
||||
explicit AddressStorage(const in6_addr& addr) : in6Addr_(addr) {}
|
||||
explicit AddressStorage(MacAddress mac);
|
||||
} addr_;
|
||||
|
||||
@ -331,13 +416,12 @@ class IPAddressV6 {
|
||||
// are *not* link-local.
|
||||
uint16_t scope_{0};
|
||||
|
||||
static const std::array<ByteArray16, 129> masks_;
|
||||
|
||||
/**
|
||||
* Set the current IPAddressV6 object to have the address specified by bytes.
|
||||
* @throws IPAddressFormatException if bytes.size() is not 16.
|
||||
* Returns IPAddressFormatError if bytes.size() is not 16.
|
||||
*/
|
||||
void setFromBinary(ByteRange bytes);
|
||||
Expected<Unit, IPAddressFormatError> trySetFromBinary(
|
||||
ByteRange bytes) noexcept;
|
||||
};
|
||||
|
||||
// boost::hash uses hash_value() so this allows boost::hash to work
|
||||
@ -348,44 +432,13 @@ std::ostream& operator<<(std::ostream& os, const IPAddressV6& addr);
|
||||
void toAppend(IPAddressV6 addr, std::string* result);
|
||||
void toAppend(IPAddressV6 addr, fbstring* result);
|
||||
|
||||
/**
|
||||
* Return true if two addresses are equal.
|
||||
*/
|
||||
inline bool operator==(const IPAddressV6& addr1, const IPAddressV6& addr2) {
|
||||
return (std::memcmp(addr1.toAddr().s6_addr, addr2.toAddr().s6_addr, 16) == 0)
|
||||
&& addr1.getScopeId() == addr2.getScopeId();
|
||||
}
|
||||
// Return true if addr1 < addr2
|
||||
inline bool operator<(const IPAddressV6& addr1, const IPAddressV6& addr2) {
|
||||
auto cmp = std::memcmp(addr1.toAddr().s6_addr,
|
||||
addr2.toAddr().s6_addr, 16) < 0;
|
||||
if (!cmp) {
|
||||
return addr1.getScopeId() < addr2.getScopeId();
|
||||
} else {
|
||||
return cmp;
|
||||
}
|
||||
}
|
||||
// Derived operators
|
||||
inline bool operator!=(const IPAddressV6& a, const IPAddressV6& b) {
|
||||
return !(a == b);
|
||||
}
|
||||
inline bool operator>(const IPAddressV6& a, const IPAddressV6& b) {
|
||||
return b < a;
|
||||
}
|
||||
inline bool operator<=(const IPAddressV6& a, const IPAddressV6& b) {
|
||||
return !(a > b);
|
||||
}
|
||||
inline bool operator>=(const IPAddressV6& a, const IPAddressV6& b) {
|
||||
return !(a < b);
|
||||
}
|
||||
|
||||
} // folly
|
||||
} // namespace folly
|
||||
|
||||
namespace std {
|
||||
template<>
|
||||
template <>
|
||||
struct hash<folly::IPAddressV6> {
|
||||
size_t operator()(const folly::IPAddressV6& addr) const {
|
||||
return addr.hash();
|
||||
}
|
||||
};
|
||||
} // std
|
||||
} // namespace std
|
||||
|
118
ios/Pods/Folly/folly/Indestructible.h
generated
118
ios/Pods/Folly/folly/Indestructible.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2016-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -16,10 +16,11 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cassert>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
#include <glog/logging.h>
|
||||
#include <folly/Likely.h>
|
||||
#include <folly/Portability.h>
|
||||
|
||||
#include <folly/Traits.h>
|
||||
|
||||
namespace folly {
|
||||
|
||||
@ -58,12 +59,61 @@ namespace folly {
|
||||
|
||||
template <typename T>
|
||||
class Indestructible final {
|
||||
|
||||
public:
|
||||
template <typename... Args>
|
||||
template <typename S = T, typename = decltype(S())>
|
||||
constexpr Indestructible() noexcept(noexcept(T())) {}
|
||||
|
||||
/**
|
||||
* Constructor accepting a single argument by forwarding reference, this
|
||||
* allows using list initialzation without the overhead of things like
|
||||
* in_place, etc and also works with std::initializer_list constructors
|
||||
* which can't be deduced, the default parameter helps there.
|
||||
*
|
||||
* auto i = folly::Indestructible<std::map<int, int>>{{{1, 2}}};
|
||||
*
|
||||
* This provides convenience
|
||||
*
|
||||
* There are two versions of this constructor - one for when the element is
|
||||
* implicitly constructible from the given argument and one for when the
|
||||
* type is explicitly but not implicitly constructible from the given
|
||||
* argument.
|
||||
*/
|
||||
template <
|
||||
typename U = T,
|
||||
_t<std::enable_if<std::is_constructible<T, U&&>::value>>* = nullptr,
|
||||
_t<std::enable_if<
|
||||
!std::is_same<Indestructible<T>, remove_cvref_t<U>>::value>>* =
|
||||
nullptr,
|
||||
_t<std::enable_if<!std::is_convertible<U&&, T>::value>>* = nullptr>
|
||||
explicit constexpr Indestructible(U&& u) noexcept(
|
||||
noexcept(T(std::declval<U>())))
|
||||
: storage_(std::forward<U>(u)) {}
|
||||
template <
|
||||
typename U = T,
|
||||
_t<std::enable_if<std::is_constructible<T, U&&>::value>>* = nullptr,
|
||||
_t<std::enable_if<
|
||||
!std::is_same<Indestructible<T>, remove_cvref_t<U>>::value>>* =
|
||||
nullptr,
|
||||
_t<std::enable_if<std::is_convertible<U&&, T>::value>>* = nullptr>
|
||||
/* implicit */ constexpr Indestructible(U&& u) noexcept(
|
||||
noexcept(T(std::declval<U>())))
|
||||
: storage_(std::forward<U>(u)) {}
|
||||
|
||||
template <typename... Args, typename = decltype(T(std::declval<Args>()...))>
|
||||
explicit constexpr Indestructible(Args&&... args) noexcept(
|
||||
std::is_nothrow_constructible<T, Args&&...>::value)
|
||||
: storage_(std::forward<Args>(args)...), inited_(true) {}
|
||||
noexcept(T(std::declval<Args>()...)))
|
||||
: storage_(std::forward<Args>(args)...) {}
|
||||
template <
|
||||
typename U,
|
||||
typename... Args,
|
||||
typename = decltype(
|
||||
T(std::declval<std::initializer_list<U>&>(),
|
||||
std::declval<Args>()...))>
|
||||
explicit constexpr Indestructible(std::initializer_list<U> il, Args... args) noexcept(
|
||||
noexcept(
|
||||
T(std::declval<std::initializer_list<U>&>(),
|
||||
std::declval<Args>()...)))
|
||||
: storage_(il, std::forward<Args>(args)...) {}
|
||||
|
||||
~Indestructible() = default;
|
||||
|
||||
@ -71,51 +121,57 @@ class Indestructible final {
|
||||
Indestructible& operator=(Indestructible const&) = delete;
|
||||
|
||||
Indestructible(Indestructible&& other) noexcept(
|
||||
std::is_nothrow_move_constructible<T>::value)
|
||||
noexcept(T(std::declval<T>())))
|
||||
: storage_(std::move(other.storage_.value)) {
|
||||
other.inited_ = false;
|
||||
other.erased_ = true;
|
||||
}
|
||||
Indestructible& operator=(Indestructible&& other) noexcept(
|
||||
std::is_nothrow_move_assignable<T>::value) {
|
||||
noexcept(T(std::declval<T>()))) {
|
||||
storage_.value = std::move(other.storage_.value);
|
||||
other.inited_ = false;
|
||||
other.erased_ = true;
|
||||
}
|
||||
|
||||
T* get() {
|
||||
T* get() noexcept {
|
||||
check();
|
||||
return &storage_.value;
|
||||
}
|
||||
T const* get() const {
|
||||
T const* get() const noexcept {
|
||||
check();
|
||||
return &storage_.value;
|
||||
}
|
||||
T& operator*() { return *get(); }
|
||||
T const& operator*() const { return *get(); }
|
||||
T* operator->() { return get(); }
|
||||
T const* operator->() const { return get(); }
|
||||
T& operator*() noexcept {
|
||||
return *get();
|
||||
}
|
||||
T const& operator*() const noexcept {
|
||||
return *get();
|
||||
}
|
||||
T* operator->() noexcept {
|
||||
return get();
|
||||
}
|
||||
T const* operator->() const noexcept {
|
||||
return get();
|
||||
}
|
||||
|
||||
private:
|
||||
void check() const {
|
||||
if (UNLIKELY(!inited_)) {
|
||||
fail();
|
||||
}
|
||||
}
|
||||
|
||||
[[noreturn]] FOLLY_NOINLINE static void fail() {
|
||||
LOG(FATAL) << "Indestructible is not initialized";
|
||||
void check() const noexcept {
|
||||
assert(!erased_);
|
||||
}
|
||||
|
||||
union Storage {
|
||||
T value;
|
||||
|
||||
template <typename... Args>
|
||||
explicit constexpr Storage(Args&&... args)
|
||||
template <typename S = T, typename = decltype(S())>
|
||||
constexpr Storage() noexcept(noexcept(T())) : value() {}
|
||||
|
||||
template <typename... Args, typename = decltype(T(std::declval<Args>()...))>
|
||||
explicit constexpr Storage(Args&&... args) noexcept(
|
||||
noexcept(T(std::declval<Args>()...)))
|
||||
: value(std::forward<Args>(args)...) {}
|
||||
|
||||
~Storage() {}
|
||||
};
|
||||
|
||||
Storage storage_;
|
||||
bool inited_{false};
|
||||
Storage storage_{};
|
||||
bool erased_{false};
|
||||
};
|
||||
}
|
||||
} // namespace folly
|
||||
|
291
ios/Pods/Folly/folly/IndexedMemPool.h
generated
291
ios/Pods/Folly/folly/IndexedMemPool.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2014-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -16,25 +16,88 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <type_traits>
|
||||
#include <stdint.h>
|
||||
#include <assert.h>
|
||||
#include <errno.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include <type_traits>
|
||||
|
||||
#include <boost/noncopyable.hpp>
|
||||
#include <folly/AtomicStruct.h>
|
||||
#include <folly/detail/CacheLocality.h>
|
||||
#include <folly/Portability.h>
|
||||
#include <folly/concurrency/CacheLocality.h>
|
||||
#include <folly/portability/SysMman.h>
|
||||
#include <folly/portability/Unistd.h>
|
||||
#include <folly/synchronization/AtomicStruct.h>
|
||||
|
||||
// Ignore shadowing warnings within this file, so includers can use -Wshadow.
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wshadow"
|
||||
FOLLY_PUSH_WARNING
|
||||
FOLLY_GNU_DISABLE_WARNING("-Wshadow")
|
||||
|
||||
namespace folly {
|
||||
|
||||
namespace detail {
|
||||
template <typename Pool>
|
||||
struct IndexedMemPoolRecycler;
|
||||
}
|
||||
} // namespace detail
|
||||
|
||||
template <
|
||||
typename T,
|
||||
bool EagerRecycleWhenTrivial = false,
|
||||
bool EagerRecycleWhenNotTrivial = true>
|
||||
struct IndexedMemPoolTraits {
|
||||
static constexpr bool eagerRecycle() {
|
||||
return std::is_trivial<T>::value ? EagerRecycleWhenTrivial
|
||||
: EagerRecycleWhenNotTrivial;
|
||||
}
|
||||
|
||||
/// Called when the element pointed to by ptr is allocated for the
|
||||
/// first time.
|
||||
static void initialize(T* ptr) {
|
||||
if (!eagerRecycle()) {
|
||||
new (ptr) T();
|
||||
}
|
||||
}
|
||||
|
||||
/// Called when the element pointed to by ptr is freed at the pool
|
||||
/// destruction time.
|
||||
static void cleanup(T* ptr) {
|
||||
if (!eagerRecycle()) {
|
||||
ptr->~T();
|
||||
}
|
||||
}
|
||||
|
||||
/// Called when the element is allocated with the arguments forwarded from
|
||||
/// IndexedMemPool::allocElem.
|
||||
template <typename... Args>
|
||||
static void onAllocate(T* ptr, Args&&... args) {
|
||||
static_assert(
|
||||
sizeof...(Args) == 0 || eagerRecycle(),
|
||||
"emplace-style allocation requires eager recycle, "
|
||||
"which is defaulted only for non-trivial types");
|
||||
if (eagerRecycle()) {
|
||||
new (ptr) T(std::forward<Args>(args)...);
|
||||
}
|
||||
}
|
||||
|
||||
/// Called when the element is recycled.
|
||||
static void onRecycle(T* ptr) {
|
||||
if (eagerRecycle()) {
|
||||
ptr->~T();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/// IndexedMemPool traits that implements the lazy lifecycle strategy. In this
|
||||
/// strategy elements are default-constructed the first time they are allocated,
|
||||
/// and destroyed when the pool itself is destroyed.
|
||||
template <typename T>
|
||||
using IndexedMemPoolTraitsLazyRecycle = IndexedMemPoolTraits<T, false, false>;
|
||||
|
||||
/// IndexedMemPool traits that implements the eager lifecycle strategy. In this
|
||||
/// strategy elements are constructed when they are allocated from the pool and
|
||||
/// destroyed when recycled.
|
||||
template <typename T>
|
||||
using IndexedMemPoolTraitsEagerRecycle = IndexedMemPoolTraits<T, true, true>;
|
||||
|
||||
/// Instances of IndexedMemPool dynamically allocate and then pool their
|
||||
/// element type (T), returning 4-byte integer indices that can be passed
|
||||
@ -53,13 +116,17 @@ struct IndexedMemPoolRecycler;
|
||||
/// there won't be an ABA match due to the element being overwritten with
|
||||
/// a different type that has the same bit pattern.
|
||||
///
|
||||
/// IndexedMemPool has two object lifecycle strategies. The first
|
||||
/// is to construct objects when they are allocated from the pool and
|
||||
/// destroy them when they are recycled. In this mode allocIndex and
|
||||
/// allocElem have emplace-like semantics. In the second mode, objects
|
||||
/// are default-constructed the first time they are removed from the pool,
|
||||
/// and deleted when the pool itself is deleted. By default the first
|
||||
/// mode is used for non-trivial T, and the second is used for trivial T.
|
||||
/// The object lifecycle strategy is controlled by the Traits parameter.
|
||||
/// One strategy, implemented by IndexedMemPoolTraitsEagerRecycle, is to
|
||||
/// construct objects when they are allocated from the pool and destroy
|
||||
/// them when they are recycled. In this mode allocIndex and allocElem
|
||||
/// have emplace-like semantics. In another strategy, implemented by
|
||||
/// IndexedMemPoolTraitsLazyRecycle, objects are default-constructed the
|
||||
/// first time they are removed from the pool, and deleted when the pool
|
||||
/// itself is deleted. By default the first mode is used for non-trivial
|
||||
/// T, and the second is used for trivial T. Clients can customize the
|
||||
/// object lifecycle by providing their own Traits implementation.
|
||||
/// See IndexedMemPoolTraits for a Traits example.
|
||||
///
|
||||
/// IMPORTANT: Space for extra elements is allocated to account for those
|
||||
/// that are inaccessible because they are in other local lists, so the
|
||||
@ -83,12 +150,12 @@ struct IndexedMemPoolRecycler;
|
||||
/// constructed, but delays element construction. This means that only
|
||||
/// elements that are actually returned to the caller get paged into the
|
||||
/// process's resident set (RSS).
|
||||
template <typename T,
|
||||
int NumLocalLists_ = 32,
|
||||
int LocalListLimit_ = 200,
|
||||
template<typename> class Atom = std::atomic,
|
||||
bool EagerRecycleWhenTrivial = false,
|
||||
bool EagerRecycleWhenNotTrivial = true>
|
||||
template <
|
||||
typename T,
|
||||
uint32_t NumLocalLists_ = 32,
|
||||
uint32_t LocalListLimit_ = 200,
|
||||
template <typename> class Atom = std::atomic,
|
||||
typename Traits = IndexedMemPoolTraits<T>>
|
||||
struct IndexedMemPool : boost::noncopyable {
|
||||
typedef T value_type;
|
||||
|
||||
@ -98,45 +165,43 @@ struct IndexedMemPool : boost::noncopyable {
|
||||
static_assert(LocalListLimit_ <= 255, "LocalListLimit must fit in 8 bits");
|
||||
enum {
|
||||
NumLocalLists = NumLocalLists_,
|
||||
LocalListLimit = LocalListLimit_
|
||||
LocalListLimit = LocalListLimit_,
|
||||
};
|
||||
|
||||
|
||||
static constexpr bool eagerRecycle() {
|
||||
return std::is_trivial<T>::value
|
||||
? EagerRecycleWhenTrivial : EagerRecycleWhenNotTrivial;
|
||||
}
|
||||
|
||||
// these are public because clients may need to reason about the number
|
||||
// of bits required to hold indices from a pool, given its capacity
|
||||
|
||||
static constexpr uint32_t maxIndexForCapacity(uint32_t capacity) {
|
||||
// index of uint32_t(-1) == UINT32_MAX is reserved for isAllocated tracking
|
||||
return std::min(uint64_t(capacity) + (NumLocalLists - 1) * LocalListLimit,
|
||||
uint64_t(uint32_t(-1) - 1));
|
||||
// index of std::numeric_limits<uint32_t>::max() is reserved for isAllocated
|
||||
// tracking
|
||||
return uint32_t(std::min(
|
||||
uint64_t(capacity) + (NumLocalLists - 1) * LocalListLimit,
|
||||
uint64_t(std::numeric_limits<uint32_t>::max() - 1)));
|
||||
}
|
||||
|
||||
static constexpr uint32_t capacityForMaxIndex(uint32_t maxIndex) {
|
||||
return maxIndex - (NumLocalLists - 1) * LocalListLimit;
|
||||
}
|
||||
|
||||
|
||||
/// Constructs a pool that can allocate at least _capacity_ elements,
|
||||
/// even if all the local lists are full
|
||||
explicit IndexedMemPool(uint32_t capacity)
|
||||
: actualCapacity_(maxIndexForCapacity(capacity))
|
||||
, size_(0)
|
||||
, globalHead_(TaggedPtr{})
|
||||
{
|
||||
: actualCapacity_(maxIndexForCapacity(capacity)),
|
||||
size_(0),
|
||||
globalHead_(TaggedPtr{}) {
|
||||
const size_t needed = sizeof(Slot) * (actualCapacity_ + 1);
|
||||
size_t pagesize = sysconf(_SC_PAGESIZE);
|
||||
size_t pagesize = size_t(sysconf(_SC_PAGESIZE));
|
||||
mmapLength_ = ((needed - 1) & ~(pagesize - 1)) + pagesize;
|
||||
assert(needed <= mmapLength_ && mmapLength_ < needed + pagesize);
|
||||
assert((mmapLength_ % pagesize) == 0);
|
||||
|
||||
slots_ = static_cast<Slot*>(mmap(nullptr, mmapLength_,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
|
||||
slots_ = static_cast<Slot*>(mmap(
|
||||
nullptr,
|
||||
mmapLength_,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS,
|
||||
-1,
|
||||
0));
|
||||
if (slots_ == MAP_FAILED) {
|
||||
assert(errno == ENOMEM);
|
||||
throw std::bad_alloc();
|
||||
@ -145,10 +210,8 @@ struct IndexedMemPool : boost::noncopyable {
|
||||
|
||||
/// Destroys all of the contained elements
|
||||
~IndexedMemPool() {
|
||||
if (!eagerRecycle()) {
|
||||
for (size_t i = size_; i > 0; --i) {
|
||||
slots_[i].~Slot();
|
||||
}
|
||||
for (uint32_t i = maxAllocatedIndex(); i > 0; --i) {
|
||||
Traits::cleanup(&slots_[i].elem);
|
||||
}
|
||||
munmap(slots_, mmapLength_);
|
||||
}
|
||||
@ -157,30 +220,40 @@ struct IndexedMemPool : boost::noncopyable {
|
||||
/// simultaneously allocated and not yet recycled. Because of the
|
||||
/// local lists it is possible that more elements than this are returned
|
||||
/// successfully
|
||||
size_t capacity() {
|
||||
uint32_t capacity() {
|
||||
return capacityForMaxIndex(actualCapacity_);
|
||||
}
|
||||
|
||||
/// Returns the maximum index of elements ever allocated in this pool
|
||||
/// including elements that have been recycled.
|
||||
uint32_t maxAllocatedIndex() const {
|
||||
// Take the minimum since it is possible that size_ > actualCapacity_.
|
||||
// This can happen if there are multiple concurrent requests
|
||||
// when size_ == actualCapacity_ - 1.
|
||||
return std::min(uint32_t(size_), uint32_t(actualCapacity_));
|
||||
}
|
||||
|
||||
/// Finds a slot with a non-zero index, emplaces a T there if we're
|
||||
/// using the eager recycle lifecycle mode, and returns the index,
|
||||
/// or returns 0 if no elements are available.
|
||||
template <typename ...Args>
|
||||
/// or returns 0 if no elements are available. Passes a pointer to
|
||||
/// the element to Traits::onAllocate before the slot is marked as
|
||||
/// allocated.
|
||||
template <typename... Args>
|
||||
uint32_t allocIndex(Args&&... args) {
|
||||
static_assert(sizeof...(Args) == 0 || eagerRecycle(),
|
||||
"emplace-style allocation requires eager recycle, "
|
||||
"which is defaulted only for non-trivial types");
|
||||
auto idx = localPop(localHead());
|
||||
if (idx != 0 && eagerRecycle()) {
|
||||
T* ptr = &slot(idx).elem;
|
||||
new (ptr) T(std::forward<Args>(args)...);
|
||||
if (idx != 0) {
|
||||
Slot& s = slot(idx);
|
||||
Traits::onAllocate(&s.elem, std::forward<Args>(args)...);
|
||||
markAllocated(s);
|
||||
}
|
||||
return idx;
|
||||
}
|
||||
|
||||
/// If an element is available, returns a std::unique_ptr to it that will
|
||||
/// recycle the element to the pool when it is reclaimed, otherwise returns
|
||||
/// a null (falsy) std::unique_ptr
|
||||
template <typename ...Args>
|
||||
/// a null (falsy) std::unique_ptr. Passes a pointer to the element to
|
||||
/// Traits::onAllocate before the slot is marked as allocated.
|
||||
template <typename... Args>
|
||||
UniquePtr allocElem(Args&&... args) {
|
||||
auto idx = allocIndex(std::forward<Args>(args)...);
|
||||
T* ptr = idx == 0 ? nullptr : &slot(idx).elem;
|
||||
@ -190,9 +263,6 @@ struct IndexedMemPool : boost::noncopyable {
|
||||
/// Gives up ownership previously granted by alloc()
|
||||
void recycleIndex(uint32_t idx) {
|
||||
assert(isAllocated(idx));
|
||||
if (eagerRecycle()) {
|
||||
slot(idx).elem.~T();
|
||||
}
|
||||
localPush(localHead(), idx);
|
||||
}
|
||||
|
||||
@ -217,7 +287,7 @@ struct IndexedMemPool : boost::noncopyable {
|
||||
|
||||
auto slot = reinterpret_cast<const Slot*>(
|
||||
reinterpret_cast<const char*>(elem) - offsetof(Slot, elem));
|
||||
auto rv = slot - slots_;
|
||||
auto rv = uint32_t(slot - slots_);
|
||||
|
||||
// this assert also tests that rv is in range
|
||||
assert(elem == &(*this)[rv]);
|
||||
@ -226,17 +296,16 @@ struct IndexedMemPool : boost::noncopyable {
|
||||
|
||||
/// Returns true iff idx has been alloc()ed and not recycleIndex()ed
|
||||
bool isAllocated(uint32_t idx) const {
|
||||
return slot(idx).localNext == uint32_t(-1);
|
||||
return slot(idx).localNext.load(std::memory_order_acquire) == uint32_t(-1);
|
||||
}
|
||||
|
||||
|
||||
private:
|
||||
///////////// types
|
||||
|
||||
struct Slot {
|
||||
T elem;
|
||||
uint32_t localNext;
|
||||
uint32_t globalNext;
|
||||
Atom<uint32_t> localNext;
|
||||
Atom<uint32_t> globalNext;
|
||||
|
||||
Slot() : localNext{}, globalNext{} {}
|
||||
};
|
||||
@ -250,9 +319,9 @@ struct IndexedMemPool : boost::noncopyable {
|
||||
uint32_t tagAndSize;
|
||||
|
||||
enum : uint32_t {
|
||||
SizeBits = 8,
|
||||
SizeMask = (1U << SizeBits) - 1,
|
||||
TagIncr = 1U << SizeBits,
|
||||
SizeBits = 8,
|
||||
SizeMask = (1U << SizeBits) - 1,
|
||||
TagIncr = 1U << SizeBits,
|
||||
};
|
||||
|
||||
uint32_t size() const {
|
||||
@ -261,21 +330,21 @@ struct IndexedMemPool : boost::noncopyable {
|
||||
|
||||
TaggedPtr withSize(uint32_t repl) const {
|
||||
assert(repl <= LocalListLimit);
|
||||
return TaggedPtr{ idx, (tagAndSize & ~SizeMask) | repl };
|
||||
return TaggedPtr{idx, (tagAndSize & ~SizeMask) | repl};
|
||||
}
|
||||
|
||||
TaggedPtr withSizeIncr() const {
|
||||
assert(size() < LocalListLimit);
|
||||
return TaggedPtr{ idx, tagAndSize + 1 };
|
||||
return TaggedPtr{idx, tagAndSize + 1};
|
||||
}
|
||||
|
||||
TaggedPtr withSizeDecr() const {
|
||||
assert(size() > 0);
|
||||
return TaggedPtr{ idx, tagAndSize - 1 };
|
||||
return TaggedPtr{idx, tagAndSize - 1};
|
||||
}
|
||||
|
||||
TaggedPtr withIdx(uint32_t repl) const {
|
||||
return TaggedPtr{ repl, tagAndSize + TagIncr };
|
||||
return TaggedPtr{repl, tagAndSize + TagIncr};
|
||||
}
|
||||
|
||||
TaggedPtr withEmpty() const {
|
||||
@ -283,23 +352,23 @@ struct IndexedMemPool : boost::noncopyable {
|
||||
}
|
||||
};
|
||||
|
||||
struct FOLLY_ALIGN_TO_AVOID_FALSE_SHARING LocalList {
|
||||
AtomicStruct<TaggedPtr,Atom> head;
|
||||
struct alignas(hardware_destructive_interference_size) LocalList {
|
||||
AtomicStruct<TaggedPtr, Atom> head;
|
||||
|
||||
LocalList() : head(TaggedPtr{}) {}
|
||||
};
|
||||
|
||||
////////// fields
|
||||
|
||||
/// the number of bytes allocated from mmap, which is a multiple of
|
||||
/// the page size of the machine
|
||||
size_t mmapLength_;
|
||||
|
||||
/// the actual number of slots that we will allocate, to guarantee
|
||||
/// that we will satisfy the capacity requested at construction time.
|
||||
/// They will be numbered 1..actualCapacity_ (note the 1-based counting),
|
||||
/// and occupy slots_[1..actualCapacity_].
|
||||
size_t actualCapacity_;
|
||||
|
||||
/// the number of bytes allocated from mmap, which is a multiple of
|
||||
/// the page size of the machine
|
||||
size_t mmapLength_;
|
||||
uint32_t actualCapacity_;
|
||||
|
||||
/// this records the number of slots that have actually been constructed.
|
||||
/// To allow use of atomic ++ instead of CAS, we let this overflow.
|
||||
@ -309,7 +378,7 @@ struct IndexedMemPool : boost::noncopyable {
|
||||
|
||||
/// raw storage, only 1..min(size_,actualCapacity_) (inclusive) are
|
||||
/// actually constructed. Note that slots_[0] is not constructed or used
|
||||
FOLLY_ALIGN_TO_AVOID_FALSE_SHARING Slot* slots_;
|
||||
alignas(hardware_destructive_interference_size) Slot* slots_;
|
||||
|
||||
/// use AccessSpreader to find your list. We use stripes instead of
|
||||
/// thread-local to avoid the need to grow or shrink on thread start
|
||||
@ -318,14 +387,15 @@ struct IndexedMemPool : boost::noncopyable {
|
||||
|
||||
/// this is the head of a list of node chained by globalNext, that are
|
||||
/// themselves each the head of a list chained by localNext
|
||||
FOLLY_ALIGN_TO_AVOID_FALSE_SHARING AtomicStruct<TaggedPtr,Atom> globalHead_;
|
||||
alignas(hardware_destructive_interference_size)
|
||||
AtomicStruct<TaggedPtr, Atom> globalHead_;
|
||||
|
||||
///////////// private methods
|
||||
|
||||
size_t slotIndex(uint32_t idx) const {
|
||||
assert(0 < idx &&
|
||||
idx <= actualCapacity_ &&
|
||||
idx <= size_.load(std::memory_order_acquire));
|
||||
uint32_t slotIndex(uint32_t idx) const {
|
||||
assert(
|
||||
0 < idx && idx <= actualCapacity_ &&
|
||||
idx <= size_.load(std::memory_order_acquire));
|
||||
return idx;
|
||||
}
|
||||
|
||||
@ -342,7 +412,7 @@ struct IndexedMemPool : boost::noncopyable {
|
||||
void globalPush(Slot& s, uint32_t localHead) {
|
||||
while (true) {
|
||||
TaggedPtr gh = globalHead_.load(std::memory_order_acquire);
|
||||
s.globalNext = gh.idx;
|
||||
s.globalNext.store(gh.idx, std::memory_order_relaxed);
|
||||
if (globalHead_.compare_exchange_strong(gh, gh.withIdx(localHead))) {
|
||||
// success
|
||||
return;
|
||||
@ -351,11 +421,12 @@ struct IndexedMemPool : boost::noncopyable {
|
||||
}
|
||||
|
||||
// idx references a single node
|
||||
void localPush(AtomicStruct<TaggedPtr,Atom>& head, uint32_t idx) {
|
||||
void localPush(AtomicStruct<TaggedPtr, Atom>& head, uint32_t idx) {
|
||||
Slot& s = slot(idx);
|
||||
TaggedPtr h = head.load(std::memory_order_acquire);
|
||||
while (true) {
|
||||
s.localNext = h.idx;
|
||||
s.localNext.store(h.idx, std::memory_order_release);
|
||||
Traits::onRecycle(&slot(idx).elem);
|
||||
|
||||
if (h.size() == LocalListLimit) {
|
||||
// push will overflow local list, steal it instead
|
||||
@ -379,8 +450,11 @@ struct IndexedMemPool : boost::noncopyable {
|
||||
uint32_t globalPop() {
|
||||
while (true) {
|
||||
TaggedPtr gh = globalHead_.load(std::memory_order_acquire);
|
||||
if (gh.idx == 0 || globalHead_.compare_exchange_strong(
|
||||
gh, gh.withIdx(slot(gh.idx).globalNext))) {
|
||||
if (gh.idx == 0 ||
|
||||
globalHead_.compare_exchange_strong(
|
||||
gh,
|
||||
gh.withIdx(
|
||||
slot(gh.idx).globalNext.load(std::memory_order_relaxed)))) {
|
||||
// global list is empty, or pop was successful
|
||||
return gh.idx;
|
||||
}
|
||||
@ -388,16 +462,15 @@ struct IndexedMemPool : boost::noncopyable {
|
||||
}
|
||||
|
||||
// returns 0 if allocation failed
|
||||
uint32_t localPop(AtomicStruct<TaggedPtr,Atom>& head) {
|
||||
uint32_t localPop(AtomicStruct<TaggedPtr, Atom>& head) {
|
||||
while (true) {
|
||||
TaggedPtr h = head.load(std::memory_order_acquire);
|
||||
if (h.idx != 0) {
|
||||
// local list is non-empty, try to pop
|
||||
Slot& s = slot(h.idx);
|
||||
if (head.compare_exchange_strong(
|
||||
h, h.withIdx(s.localNext).withSizeDecr())) {
|
||||
auto next = s.localNext.load(std::memory_order_relaxed);
|
||||
if (head.compare_exchange_strong(h, h.withIdx(next).withSizeDecr())) {
|
||||
// success
|
||||
s.localNext = uint32_t(-1);
|
||||
return h.idx;
|
||||
}
|
||||
continue;
|
||||
@ -411,21 +484,15 @@ struct IndexedMemPool : boost::noncopyable {
|
||||
// allocation failed
|
||||
return 0;
|
||||
}
|
||||
// default-construct it now if we aren't going to construct and
|
||||
// destroy on each allocation
|
||||
if (!eagerRecycle()) {
|
||||
T* ptr = &slot(idx).elem;
|
||||
new (ptr) T();
|
||||
}
|
||||
slot(idx).localNext = uint32_t(-1);
|
||||
Traits::initialize(&slot(idx).elem);
|
||||
return idx;
|
||||
}
|
||||
|
||||
Slot& s = slot(idx);
|
||||
auto next = s.localNext.load(std::memory_order_relaxed);
|
||||
if (head.compare_exchange_strong(
|
||||
h, h.withIdx(s.localNext).withSize(LocalListLimit))) {
|
||||
h, h.withIdx(next).withSize(LocalListLimit))) {
|
||||
// global list moved to local list, keep head for us
|
||||
s.localNext = uint32_t(-1);
|
||||
return idx;
|
||||
}
|
||||
// local bulk push failed, return idx to the global list and try again
|
||||
@ -433,10 +500,17 @@ struct IndexedMemPool : boost::noncopyable {
|
||||
}
|
||||
}
|
||||
|
||||
AtomicStruct<TaggedPtr,Atom>& localHead() {
|
||||
auto stripe = detail::AccessSpreader<Atom>::current(NumLocalLists);
|
||||
AtomicStruct<TaggedPtr, Atom>& localHead() {
|
||||
auto stripe = AccessSpreader<Atom>::current(NumLocalLists);
|
||||
return local_[stripe].head;
|
||||
}
|
||||
|
||||
void markAllocated(Slot& slot) {
|
||||
slot.localNext.store(uint32_t(-1), std::memory_order_release);
|
||||
}
|
||||
|
||||
public:
|
||||
static constexpr std::size_t kSlotSize = sizeof(Slot);
|
||||
};
|
||||
|
||||
namespace detail {
|
||||
@ -450,18 +524,17 @@ struct IndexedMemPoolRecycler {
|
||||
|
||||
explicit IndexedMemPoolRecycler(Pool* pool) : pool(pool) {}
|
||||
|
||||
IndexedMemPoolRecycler(const IndexedMemPoolRecycler<Pool>& rhs)
|
||||
= default;
|
||||
IndexedMemPoolRecycler& operator= (const IndexedMemPoolRecycler<Pool>& rhs)
|
||||
= default;
|
||||
IndexedMemPoolRecycler(const IndexedMemPoolRecycler<Pool>& rhs) = default;
|
||||
IndexedMemPoolRecycler& operator=(const IndexedMemPoolRecycler<Pool>& rhs) =
|
||||
default;
|
||||
|
||||
void operator()(typename Pool::value_type* elem) const {
|
||||
pool->recycleIndex(pool->locateElem(elem));
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace detail
|
||||
|
||||
} // namespace folly
|
||||
|
||||
# pragma GCC diagnostic pop
|
||||
FOLLY_POP_WARNING
|
||||
|
12
ios/Pods/Folly/folly/IntrusiveList.h
generated
12
ios/Pods/Folly/folly/IntrusiveList.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2011-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -29,7 +29,7 @@ namespace folly {
|
||||
* An auto-unlink intrusive list hook.
|
||||
*/
|
||||
using IntrusiveListHook = boost::intrusive::list_member_hook<
|
||||
boost::intrusive::link_mode<boost::intrusive::auto_unlink>>;
|
||||
boost::intrusive::link_mode<boost::intrusive::auto_unlink>>;
|
||||
|
||||
/**
|
||||
* An intrusive list.
|
||||
@ -61,7 +61,7 @@ using IntrusiveListHook = boost::intrusive::list_member_hook<
|
||||
* The elements stored in the list must contain an IntrusiveListHook member
|
||||
* variable.
|
||||
*/
|
||||
template<typename T, IntrusiveListHook T::* PtrToMember>
|
||||
template <typename T, IntrusiveListHook T::*PtrToMember>
|
||||
using IntrusiveList = boost::intrusive::list<
|
||||
T,
|
||||
boost::intrusive::member_hook<T, IntrusiveListHook, PtrToMember>,
|
||||
@ -71,7 +71,7 @@ using IntrusiveList = boost::intrusive::list<
|
||||
* A safe-link intrusive list hook.
|
||||
*/
|
||||
using SafeIntrusiveListHook = boost::intrusive::list_member_hook<
|
||||
boost::intrusive::link_mode<boost::intrusive::safe_link>>;
|
||||
boost::intrusive::link_mode<boost::intrusive::safe_link>>;
|
||||
|
||||
/**
|
||||
* An intrusive list with const-time size() method.
|
||||
@ -109,10 +109,10 @@ using SafeIntrusiveListHook = boost::intrusive::list_member_hook<
|
||||
* The elements stored in the list must contain an SafeIntrusiveListHook member
|
||||
* variable.
|
||||
*/
|
||||
template<typename T, SafeIntrusiveListHook T::* PtrToMember>
|
||||
template <typename T, SafeIntrusiveListHook T::*PtrToMember>
|
||||
using CountedIntrusiveList = boost::intrusive::list<
|
||||
T,
|
||||
boost::intrusive::member_hook<T, SafeIntrusiveListHook, PtrToMember>,
|
||||
boost::intrusive::constant_time_size<true>>;
|
||||
|
||||
} // folly
|
||||
} // namespace folly
|
||||
|
65
ios/Pods/Folly/folly/Lazy.h
generated
65
ios/Pods/Folly/folly/Lazy.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2013-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -16,10 +16,11 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <utility>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
|
||||
#include <folly/Optional.h>
|
||||
#include <folly/functional/Invoke.h>
|
||||
|
||||
namespace folly {
|
||||
|
||||
@ -86,48 +87,58 @@ namespace folly {
|
||||
|
||||
namespace detail {
|
||||
|
||||
template<class Func>
|
||||
template <class Func>
|
||||
struct Lazy {
|
||||
typedef typename std::result_of<Func()>::type result_type;
|
||||
typedef invoke_result_t<Func> result_type;
|
||||
|
||||
static_assert(
|
||||
!std::is_const<Func>::value,
|
||||
"Func should not be a const-qualified type");
|
||||
static_assert(
|
||||
!std::is_reference<Func>::value,
|
||||
"Func should not be a reference type");
|
||||
|
||||
explicit Lazy(Func&& f) : func_(std::move(f)) {}
|
||||
explicit Lazy(Func& f) : func_(f) {}
|
||||
explicit Lazy(const Func& f) : func_(f) {}
|
||||
|
||||
Lazy(Lazy&& o)
|
||||
: value_(std::move(o.value_))
|
||||
, func_(std::move(o.func_))
|
||||
{}
|
||||
Lazy(Lazy&& o) : value_(std::move(o.value_)), func_(std::move(o.func_)) {}
|
||||
|
||||
Lazy(const Lazy&) = delete;
|
||||
Lazy& operator=(const Lazy&) = delete;
|
||||
Lazy& operator=(Lazy&&) = delete;
|
||||
|
||||
const result_type& operator()() const {
|
||||
return const_cast<Lazy&>(*this)();
|
||||
}
|
||||
ensure_initialized();
|
||||
|
||||
result_type& operator()() {
|
||||
if (!value_) value_ = func_();
|
||||
return *value_;
|
||||
}
|
||||
|
||||
private:
|
||||
Optional<result_type> value_;
|
||||
Func func_;
|
||||
result_type& operator()() {
|
||||
ensure_initialized();
|
||||
|
||||
return *value_;
|
||||
}
|
||||
|
||||
private:
|
||||
void ensure_initialized() const {
|
||||
if (!value_) {
|
||||
value_ = func_();
|
||||
}
|
||||
}
|
||||
|
||||
mutable Optional<result_type> value_;
|
||||
mutable Func func_;
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
||||
template <class Func>
|
||||
auto lazy(Func&& fun) {
|
||||
return detail::Lazy<remove_cvref_t<Func>>(std::forward<Func>(fun));
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
||||
template<class Func>
|
||||
detail::Lazy<typename std::remove_reference<Func>::type>
|
||||
lazy(Func&& fun) {
|
||||
return detail::Lazy<typename std::remove_reference<Func>::type>(
|
||||
std::forward<Func>(fun)
|
||||
);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
||||
}
|
||||
} // namespace folly
|
||||
|
606
ios/Pods/Folly/folly/LifoSem.h
generated
606
ios/Pods/Folly/folly/LifoSem.h
generated
@ -1,606 +0,0 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <string.h>
|
||||
#include <stdint.h>
|
||||
#include <atomic>
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
#include <system_error>
|
||||
|
||||
#include <folly/AtomicStruct.h>
|
||||
#include <folly/Baton.h>
|
||||
#include <folly/IndexedMemPool.h>
|
||||
#include <folly/Likely.h>
|
||||
#include <folly/detail/CacheLocality.h>
|
||||
|
||||
namespace folly {
|
||||
|
||||
template <template<typename> class Atom = std::atomic,
|
||||
class BatonType = Baton<Atom>>
|
||||
struct LifoSemImpl;
|
||||
|
||||
/// LifoSem is a semaphore that wakes its waiters in a manner intended to
|
||||
/// maximize performance rather than fairness. It should be preferred
|
||||
/// to a mutex+condvar or POSIX sem_t solution when all of the waiters
|
||||
/// are equivalent. It is faster than a condvar or sem_t, and it has a
|
||||
/// shutdown state that might save you a lot of complexity when it comes
|
||||
/// time to shut down your work pipelines. LifoSem is larger than sem_t,
|
||||
/// but that is only because it uses padding and alignment to avoid
|
||||
/// false sharing.
|
||||
///
|
||||
/// LifoSem allows multi-post and multi-tryWait, and provides a shutdown
|
||||
/// state that awakens all waiters. LifoSem is faster than sem_t because
|
||||
/// it performs exact wakeups, so it often requires fewer system calls.
|
||||
/// It provides all of the functionality of sem_t except for timed waiting.
|
||||
/// It is called LifoSem because its wakeup policy is approximately LIFO,
|
||||
/// rather than the usual FIFO.
|
||||
///
|
||||
/// The core semaphore operations provided are:
|
||||
///
|
||||
/// -- post() -- if there is a pending waiter, wake it up, otherwise
|
||||
/// increment the value of the semaphore. If the value of the semaphore
|
||||
/// is already 2^32-1, does nothing. Compare to sem_post().
|
||||
///
|
||||
/// -- post(n) -- equivalent to n calls to post(), but much more efficient.
|
||||
/// sem_t has no equivalent to this method.
|
||||
///
|
||||
/// -- bool tryWait() -- if the semaphore's value is positive, decrements it
|
||||
/// and returns true, otherwise returns false. Compare to sem_trywait().
|
||||
///
|
||||
/// -- uint32_t tryWait(uint32_t n) -- attempts to decrement the semaphore's
|
||||
/// value by n, returning the amount by which it actually was decremented
|
||||
/// (a value from 0 to n inclusive). Not atomic. Equivalent to n calls
|
||||
/// to tryWait(). sem_t has no equivalent to this method.
|
||||
///
|
||||
/// -- wait() -- waits until tryWait() can succeed. Compare to sem_wait().
|
||||
///
|
||||
/// LifoSem also has the notion of a shutdown state, in which any calls
|
||||
/// that would block (or are already blocked) throw ShutdownSemError.
|
||||
/// Note the difference between a call to wait() and a call to wait()
|
||||
/// that might block. In the former case tryWait() would succeed, and no
|
||||
/// isShutdown() check is performed. In the latter case an exception is
|
||||
/// thrown. This behavior allows a LifoSem controlling work distribution
|
||||
/// to drain. If you want to immediately stop all waiting on shutdown,
|
||||
/// you can just check isShutdown() yourself (preferrably wrapped in
|
||||
/// an UNLIKELY). This fast-stop behavior is easy to add, but difficult
|
||||
/// to remove if you want the draining behavior, which is why we have
|
||||
/// chosen the former. Since wait() is the only method that can block,
|
||||
/// it is the only one that is affected by the shutdown state.
|
||||
///
|
||||
/// All LifoSem operations operations except valueGuess() are guaranteed
|
||||
/// to be linearizable.
|
||||
typedef LifoSemImpl<> LifoSem;
|
||||
|
||||
|
||||
/// The exception thrown when wait()ing on an isShutdown() LifoSem
|
||||
struct ShutdownSemError : public std::runtime_error {
|
||||
explicit ShutdownSemError(const std::string& msg);
|
||||
virtual ~ShutdownSemError() noexcept;
|
||||
};
|
||||
|
||||
namespace detail {
|
||||
|
||||
// Internally, a LifoSem is either a value or a linked list of wait nodes.
|
||||
// This union is captured in the LifoSemHead type, which holds either a
|
||||
// value or an indexed pointer to the list. LifoSemHead itself is a value
|
||||
// type, the head is a mutable atomic box containing a LifoSemHead value.
|
||||
// Each wait node corresponds to exactly one waiter. Values can flow
|
||||
// through the semaphore either by going into and out of the head's value,
|
||||
// or by direct communication from a poster to a waiter. The former path
|
||||
// is taken when there are no pending waiters, the latter otherwise. The
|
||||
// general flow of a post is to try to increment the value or pop-and-post
|
||||
// a wait node. Either of those have the effect of conveying one semaphore
|
||||
// unit. Waiting is the opposite, either a decrement of the value or
|
||||
// push-and-wait of a wait node. The generic LifoSemBase abstracts the
|
||||
// actual mechanism by which a wait node's post->wait communication is
|
||||
// performed, which is why we have LifoSemRawNode and LifoSemNode.
|
||||
|
||||
/// LifoSemRawNode is the actual pooled storage that backs LifoSemNode
|
||||
/// for user-specified Handoff types. This is done so that we can have
|
||||
/// a large static IndexedMemPool of nodes, instead of per-type pools
|
||||
template <template<typename> class Atom>
|
||||
struct LifoSemRawNode {
|
||||
std::aligned_storage<sizeof(void*),alignof(void*)>::type raw;
|
||||
|
||||
/// The IndexedMemPool index of the next node in this chain, or 0
|
||||
/// if none. This will be set to uint32_t(-1) if the node is being
|
||||
/// posted due to a shutdown-induced wakeup
|
||||
uint32_t next;
|
||||
|
||||
bool isShutdownNotice() const { return next == uint32_t(-1); }
|
||||
void clearShutdownNotice() { next = 0; }
|
||||
void setShutdownNotice() { next = uint32_t(-1); }
|
||||
|
||||
typedef folly::IndexedMemPool<LifoSemRawNode<Atom>,32,200,Atom> Pool;
|
||||
|
||||
/// Storage for all of the waiter nodes for LifoSem-s that use Atom
|
||||
static Pool& pool();
|
||||
};
|
||||
|
||||
/// Use this macro to declare the static storage that backs the raw nodes
|
||||
/// for the specified atomic type
|
||||
#define LIFOSEM_DECLARE_POOL(Atom, capacity) \
|
||||
namespace folly { \
|
||||
namespace detail { \
|
||||
template <> \
|
||||
LifoSemRawNode<Atom>::Pool& LifoSemRawNode<Atom>::pool() { \
|
||||
static Pool* instance = new Pool((capacity)); \
|
||||
return *instance; \
|
||||
} \
|
||||
} \
|
||||
}
|
||||
|
||||
/// Handoff is a type not bigger than a void* that knows how to perform a
|
||||
/// single post() -> wait() communication. It must have a post() method.
|
||||
/// If it has a wait() method then LifoSemBase's wait() implementation
|
||||
/// will work out of the box, otherwise you will need to specialize
|
||||
/// LifoSemBase::wait accordingly.
|
||||
template <typename Handoff, template<typename> class Atom>
|
||||
struct LifoSemNode : public LifoSemRawNode<Atom> {
|
||||
|
||||
static_assert(sizeof(Handoff) <= sizeof(LifoSemRawNode<Atom>::raw),
|
||||
"Handoff too big for small-object optimization, use indirection");
|
||||
static_assert(alignof(Handoff) <=
|
||||
alignof(decltype(LifoSemRawNode<Atom>::raw)),
|
||||
"Handoff alignment constraint not satisfied");
|
||||
|
||||
template <typename ...Args>
|
||||
void init(Args&&... args) {
|
||||
new (&this->raw) Handoff(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
void destroy() {
|
||||
handoff().~Handoff();
|
||||
#ifndef NDEBUG
|
||||
memset(&this->raw, 'F', sizeof(this->raw));
|
||||
#endif
|
||||
}
|
||||
|
||||
Handoff& handoff() {
|
||||
return *static_cast<Handoff*>(static_cast<void*>(&this->raw));
|
||||
}
|
||||
|
||||
const Handoff& handoff() const {
|
||||
return *static_cast<const Handoff*>(static_cast<const void*>(&this->raw));
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Handoff, template<typename> class Atom>
|
||||
struct LifoSemNodeRecycler {
|
||||
void operator()(LifoSemNode<Handoff,Atom>* elem) const {
|
||||
elem->destroy();
|
||||
auto idx = LifoSemRawNode<Atom>::pool().locateElem(elem);
|
||||
LifoSemRawNode<Atom>::pool().recycleIndex(idx);
|
||||
}
|
||||
};
|
||||
|
||||
/// LifoSemHead is a 64-bit struct that holds a 32-bit value, some state
|
||||
/// bits, and a sequence number used to avoid ABA problems in the lock-free
|
||||
/// management of the LifoSem's wait lists. The value can either hold
|
||||
/// an integral semaphore value (if there are no waiters) or a node index
|
||||
/// (see IndexedMemPool) for the head of a list of wait nodes
|
||||
class LifoSemHead {
|
||||
// What we really want are bitfields:
|
||||
// uint64_t data : 32; uint64_t isNodeIdx : 1; uint64_t seq : 31;
|
||||
// Unfortunately g++ generates pretty bad code for this sometimes (I saw
|
||||
// -O3 code from gcc 4.7.1 copying the bitfields one at a time instead of
|
||||
// in bulk, for example). We can generate better code anyway by assuming
|
||||
// that setters won't be given values that cause under/overflow, and
|
||||
// putting the sequence at the end where its planned overflow doesn't
|
||||
// need any masking.
|
||||
//
|
||||
// data == 0 (empty list) with isNodeIdx is conceptually the same
|
||||
// as data == 0 (no unclaimed increments) with !isNodeIdx, we always
|
||||
// convert the former into the latter to make the logic simpler.
|
||||
enum {
|
||||
IsNodeIdxShift = 32,
|
||||
IsShutdownShift = 33,
|
||||
SeqShift = 34,
|
||||
};
|
||||
enum : uint64_t {
|
||||
IsNodeIdxMask = uint64_t(1) << IsNodeIdxShift,
|
||||
IsShutdownMask = uint64_t(1) << IsShutdownShift,
|
||||
SeqIncr = uint64_t(1) << SeqShift,
|
||||
SeqMask = ~(SeqIncr - 1),
|
||||
};
|
||||
|
||||
public:
|
||||
|
||||
uint64_t bits;
|
||||
|
||||
//////// getters
|
||||
|
||||
inline uint32_t idx() const {
|
||||
assert(isNodeIdx());
|
||||
assert(uint32_t(bits) != 0);
|
||||
return uint32_t(bits);
|
||||
}
|
||||
inline uint32_t value() const {
|
||||
assert(!isNodeIdx());
|
||||
return uint32_t(bits);
|
||||
}
|
||||
inline constexpr bool isNodeIdx() const {
|
||||
return (bits & IsNodeIdxMask) != 0;
|
||||
}
|
||||
inline constexpr bool isShutdown() const {
|
||||
return (bits & IsShutdownMask) != 0;
|
||||
}
|
||||
inline constexpr uint32_t seq() const {
|
||||
return uint32_t(bits >> SeqShift);
|
||||
}
|
||||
|
||||
//////// setter-like things return a new struct
|
||||
|
||||
/// This should only be used for initial construction, not for setting
|
||||
/// the value, because it clears the sequence number
|
||||
static inline constexpr LifoSemHead fresh(uint32_t value) {
|
||||
return LifoSemHead{ value };
|
||||
}
|
||||
|
||||
/// Returns the LifoSemHead that results from popping a waiter node,
|
||||
/// given the current waiter node's next ptr
|
||||
inline LifoSemHead withPop(uint32_t idxNext) const {
|
||||
assert(isNodeIdx());
|
||||
if (idxNext == 0) {
|
||||
// no isNodeIdx bit or data bits. Wraparound of seq bits is okay
|
||||
return LifoSemHead{ (bits & (SeqMask | IsShutdownMask)) + SeqIncr };
|
||||
} else {
|
||||
// preserve sequence bits (incremented with wraparound okay) and
|
||||
// isNodeIdx bit, replace all data bits
|
||||
return LifoSemHead{
|
||||
(bits & (SeqMask | IsShutdownMask | IsNodeIdxMask)) +
|
||||
SeqIncr + idxNext };
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the LifoSemHead that results from pushing a new waiter node
|
||||
inline LifoSemHead withPush(uint32_t _idx) const {
|
||||
assert(isNodeIdx() || value() == 0);
|
||||
assert(!isShutdown());
|
||||
assert(_idx != 0);
|
||||
return LifoSemHead{ (bits & SeqMask) | IsNodeIdxMask | _idx };
|
||||
}
|
||||
|
||||
/// Returns the LifoSemHead with value increased by delta, with
|
||||
/// saturation if the maximum value is reached
|
||||
inline LifoSemHead withValueIncr(uint32_t delta) const {
|
||||
assert(!isNodeIdx());
|
||||
auto rv = LifoSemHead{ bits + SeqIncr + delta };
|
||||
if (UNLIKELY(rv.isNodeIdx())) {
|
||||
// value has overflowed into the isNodeIdx bit
|
||||
rv = LifoSemHead{ (rv.bits & ~IsNodeIdxMask) | (IsNodeIdxMask - 1) };
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
/// Returns the LifoSemHead that results from decrementing the value
|
||||
inline LifoSemHead withValueDecr(uint32_t delta) const {
|
||||
assert(delta > 0 && delta <= value());
|
||||
return LifoSemHead{ bits + SeqIncr - delta };
|
||||
}
|
||||
|
||||
/// Returns the LifoSemHead with the same state as the current node,
|
||||
/// but with the shutdown bit set
|
||||
inline LifoSemHead withShutdown() const {
|
||||
return LifoSemHead{ bits | IsShutdownMask };
|
||||
}
|
||||
|
||||
inline constexpr bool operator== (const LifoSemHead& rhs) const {
|
||||
return bits == rhs.bits;
|
||||
}
|
||||
inline constexpr bool operator!= (const LifoSemHead& rhs) const {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
};
|
||||
|
||||
/// LifoSemBase is the engine for several different types of LIFO
|
||||
/// semaphore. LifoSemBase handles storage of positive semaphore values
|
||||
/// and wait nodes, but the actual waiting and notification mechanism is
|
||||
/// up to the client.
|
||||
///
|
||||
/// The Handoff type is responsible for arranging one wakeup notification.
|
||||
/// See LifoSemNode for more information on how to make your own.
|
||||
template <typename Handoff,
|
||||
template<typename> class Atom = std::atomic>
|
||||
struct LifoSemBase {
|
||||
|
||||
/// Constructor
|
||||
constexpr explicit LifoSemBase(uint32_t initialValue = 0)
|
||||
: head_(LifoSemHead::fresh(initialValue)), padding_() {}
|
||||
|
||||
LifoSemBase(LifoSemBase const&) = delete;
|
||||
LifoSemBase& operator=(LifoSemBase const&) = delete;
|
||||
|
||||
/// Silently saturates if value is already 2^32-1
|
||||
void post() {
|
||||
auto idx = incrOrPop(1);
|
||||
if (idx != 0) {
|
||||
idxToNode(idx).handoff().post();
|
||||
}
|
||||
}
|
||||
|
||||
/// Equivalent to n calls to post(), except may be much more efficient.
|
||||
/// At any point in time at which the semaphore's value would exceed
|
||||
/// 2^32-1 if tracked with infinite precision, it may be silently
|
||||
/// truncated to 2^32-1. This saturation is not guaranteed to be exact,
|
||||
/// although it is guaranteed that overflow won't result in wrap-around.
|
||||
/// There would be a substantial performance and complexity cost in
|
||||
/// guaranteeing exact saturation (similar to the cost of maintaining
|
||||
/// linearizability near the zero value, but without as much of
|
||||
/// a benefit).
|
||||
void post(uint32_t n) {
|
||||
uint32_t idx;
|
||||
while (n > 0 && (idx = incrOrPop(n)) != 0) {
|
||||
// pop accounts for only 1
|
||||
idxToNode(idx).handoff().post();
|
||||
--n;
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true iff shutdown() has been called
|
||||
bool isShutdown() const {
|
||||
return UNLIKELY(head_.load(std::memory_order_acquire).isShutdown());
|
||||
}
|
||||
|
||||
/// Prevents blocking on this semaphore, causing all blocking wait()
|
||||
/// calls to throw ShutdownSemError. Both currently blocked wait() and
|
||||
/// future calls to wait() for which tryWait() would return false will
|
||||
/// cause an exception. Calls to wait() for which the matching post()
|
||||
/// has already occurred will proceed normally.
|
||||
void shutdown() {
|
||||
// first set the shutdown bit
|
||||
auto h = head_.load(std::memory_order_acquire);
|
||||
while (!h.isShutdown()) {
|
||||
if (head_.compare_exchange_strong(h, h.withShutdown())) {
|
||||
// success
|
||||
h = h.withShutdown();
|
||||
break;
|
||||
}
|
||||
// compare_exchange_strong rereads h, retry
|
||||
}
|
||||
|
||||
// now wake up any waiters
|
||||
while (h.isNodeIdx()) {
|
||||
auto& node = idxToNode(h.idx());
|
||||
auto repl = h.withPop(node.next);
|
||||
if (head_.compare_exchange_strong(h, repl)) {
|
||||
// successful pop, wake up the waiter and move on. The next
|
||||
// field is used to convey that this wakeup didn't consume a value
|
||||
node.setShutdownNotice();
|
||||
node.handoff().post();
|
||||
h = repl;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true iff value was decremented
|
||||
bool tryWait() {
|
||||
uint32_t n = 1;
|
||||
auto rv = decrOrPush(n, 0);
|
||||
assert((rv == WaitResult::DECR && n == 0) ||
|
||||
(rv != WaitResult::DECR && n == 1));
|
||||
// SHUTDOWN is okay here, since we don't actually wait
|
||||
return rv == WaitResult::DECR;
|
||||
}
|
||||
|
||||
/// Equivalent to (but may be much more efficient than) n calls to
|
||||
/// tryWait(). Returns the total amount by which the semaphore's value
|
||||
/// was decreased
|
||||
uint32_t tryWait(uint32_t n) {
|
||||
auto const orig = n;
|
||||
while (n > 0) {
|
||||
#ifndef NDEBUG
|
||||
auto prev = n;
|
||||
#endif
|
||||
auto rv = decrOrPush(n, 0);
|
||||
assert((rv == WaitResult::DECR && n < prev) ||
|
||||
(rv != WaitResult::DECR && n == prev));
|
||||
if (rv != WaitResult::DECR) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
return orig - n;
|
||||
}
|
||||
|
||||
/// Blocks the current thread until there is a matching post or the
|
||||
/// semaphore is shut down. Throws ShutdownSemError if the semaphore
|
||||
/// has been shut down and this method would otherwise be blocking.
|
||||
/// Note that wait() doesn't throw during shutdown if tryWait() would
|
||||
/// return true
|
||||
void wait() {
|
||||
// early check isn't required for correctness, but is an important
|
||||
// perf win if we can avoid allocating and deallocating a node
|
||||
if (tryWait()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// allocateNode() won't compile unless Handoff has a default
|
||||
// constructor
|
||||
UniquePtr node = allocateNode();
|
||||
|
||||
auto rv = tryWaitOrPush(*node);
|
||||
if (UNLIKELY(rv == WaitResult::SHUTDOWN)) {
|
||||
assert(isShutdown());
|
||||
throw ShutdownSemError("wait() would block but semaphore is shut down");
|
||||
}
|
||||
|
||||
if (rv == WaitResult::PUSH) {
|
||||
node->handoff().wait();
|
||||
if (UNLIKELY(node->isShutdownNotice())) {
|
||||
// this wait() didn't consume a value, it was triggered by shutdown
|
||||
assert(isShutdown());
|
||||
throw ShutdownSemError(
|
||||
"blocking wait() interrupted by semaphore shutdown");
|
||||
}
|
||||
|
||||
// node->handoff().wait() can't return until after the node has
|
||||
// been popped and post()ed, so it is okay for the UniquePtr to
|
||||
// recycle the node now
|
||||
}
|
||||
// else node wasn't pushed, so it is safe to recycle
|
||||
}
|
||||
|
||||
/// Returns a guess at the current value, designed for debugging.
|
||||
/// If there are no concurrent posters or waiters then this will
|
||||
/// be correct
|
||||
uint32_t valueGuess() const {
|
||||
// this is actually linearizable, but we don't promise that because
|
||||
// we may want to add striping in the future to help under heavy
|
||||
// contention
|
||||
auto h = head_.load(std::memory_order_acquire);
|
||||
return h.isNodeIdx() ? 0 : h.value();
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
enum class WaitResult {
|
||||
PUSH,
|
||||
DECR,
|
||||
SHUTDOWN,
|
||||
};
|
||||
|
||||
/// The type of a std::unique_ptr that will automatically return a
|
||||
/// LifoSemNode to the appropriate IndexedMemPool
|
||||
typedef std::unique_ptr<LifoSemNode<Handoff, Atom>,
|
||||
LifoSemNodeRecycler<Handoff, Atom>> UniquePtr;
|
||||
|
||||
/// Returns a node that can be passed to decrOrLink
|
||||
template <typename... Args>
|
||||
UniquePtr allocateNode(Args&&... args) {
|
||||
auto idx = LifoSemRawNode<Atom>::pool().allocIndex();
|
||||
if (idx != 0) {
|
||||
auto& node = idxToNode(idx);
|
||||
node.clearShutdownNotice();
|
||||
try {
|
||||
node.init(std::forward<Args>(args)...);
|
||||
} catch (...) {
|
||||
LifoSemRawNode<Atom>::pool().recycleIndex(idx);
|
||||
throw;
|
||||
}
|
||||
return UniquePtr(&node);
|
||||
} else {
|
||||
return UniquePtr();
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns DECR if the semaphore value was decremented (and waiterNode
|
||||
/// was untouched), PUSH if a reference to the wait node was pushed,
|
||||
/// or SHUTDOWN if decrement was not possible and push wasn't allowed
|
||||
/// because isShutdown(). Ownership of the wait node remains the
|
||||
/// responsibility of the caller, who must not release it until after
|
||||
/// the node's Handoff has been posted.
|
||||
WaitResult tryWaitOrPush(LifoSemNode<Handoff, Atom>& waiterNode) {
|
||||
uint32_t n = 1;
|
||||
return decrOrPush(n, nodeToIdx(waiterNode));
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
FOLLY_ALIGN_TO_AVOID_FALSE_SHARING
|
||||
folly::AtomicStruct<LifoSemHead,Atom> head_;
|
||||
|
||||
char padding_[folly::detail::CacheLocality::kFalseSharingRange -
|
||||
sizeof(LifoSemHead)];
|
||||
|
||||
|
||||
static LifoSemNode<Handoff, Atom>& idxToNode(uint32_t idx) {
|
||||
auto raw = &LifoSemRawNode<Atom>::pool()[idx];
|
||||
return *static_cast<LifoSemNode<Handoff, Atom>*>(raw);
|
||||
}
|
||||
|
||||
static uint32_t nodeToIdx(const LifoSemNode<Handoff, Atom>& node) {
|
||||
return LifoSemRawNode<Atom>::pool().locateElem(&node);
|
||||
}
|
||||
|
||||
/// Either increments by n and returns 0, or pops a node and returns it.
|
||||
/// If n + the stripe's value overflows, then the stripe's value
|
||||
/// saturates silently at 2^32-1
|
||||
uint32_t incrOrPop(uint32_t n) {
|
||||
while (true) {
|
||||
assert(n > 0);
|
||||
|
||||
auto head = head_.load(std::memory_order_acquire);
|
||||
if (head.isNodeIdx()) {
|
||||
auto& node = idxToNode(head.idx());
|
||||
if (head_.compare_exchange_strong(head, head.withPop(node.next))) {
|
||||
// successful pop
|
||||
return head.idx();
|
||||
}
|
||||
} else {
|
||||
auto after = head.withValueIncr(n);
|
||||
if (head_.compare_exchange_strong(head, after)) {
|
||||
// successful incr
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
// retry
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns DECR if some amount was decremented, with that amount
|
||||
/// subtracted from n. If n is 1 and this function returns DECR then n
|
||||
/// must be 0 afterward. Returns PUSH if no value could be decremented
|
||||
/// and idx was pushed, or if idx was zero and no push was performed but
|
||||
/// a push would have been performed with a valid node. Returns SHUTDOWN
|
||||
/// if the caller should have blocked but isShutdown(). If idx == 0,
|
||||
/// may return PUSH even after isShutdown() or may return SHUTDOWN
|
||||
WaitResult decrOrPush(uint32_t& n, uint32_t idx) {
|
||||
assert(n > 0);
|
||||
|
||||
while (true) {
|
||||
auto head = head_.load(std::memory_order_acquire);
|
||||
|
||||
if (!head.isNodeIdx() && head.value() > 0) {
|
||||
// decr
|
||||
auto delta = std::min(n, head.value());
|
||||
if (head_.compare_exchange_strong(head, head.withValueDecr(delta))) {
|
||||
n -= delta;
|
||||
return WaitResult::DECR;
|
||||
}
|
||||
} else {
|
||||
// push
|
||||
if (idx == 0) {
|
||||
return WaitResult::PUSH;
|
||||
}
|
||||
|
||||
if (UNLIKELY(head.isShutdown())) {
|
||||
return WaitResult::SHUTDOWN;
|
||||
}
|
||||
|
||||
auto& node = idxToNode(idx);
|
||||
node.next = head.isNodeIdx() ? head.idx() : 0;
|
||||
if (head_.compare_exchange_strong(head, head.withPush(idx))) {
|
||||
// push succeeded
|
||||
return WaitResult::PUSH;
|
||||
}
|
||||
}
|
||||
}
|
||||
// retry
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
|
||||
template <template<typename> class Atom, class BatonType>
|
||||
struct LifoSemImpl : public detail::LifoSemBase<BatonType, Atom> {
|
||||
constexpr explicit LifoSemImpl(uint32_t v = 0)
|
||||
: detail::LifoSemBase<BatonType, Atom>(v) {}
|
||||
};
|
||||
|
||||
} // namespace folly
|
36
ios/Pods/Folly/folly/Likely.h
generated
36
ios/Pods/Folly/folly/Likely.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2011-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -14,22 +14,36 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Compiler hints to indicate the fast path of an "if" branch: whether
|
||||
* the if condition is likely to be true or false.
|
||||
*
|
||||
* @author Tudor Bosman (tudorb@fb.com)
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#if __GNUC__
|
||||
#define FOLLY_DETAIL_BUILTIN_EXPECT(b, t) (__builtin_expect(b, t))
|
||||
#else
|
||||
#define FOLLY_DETAIL_BUILTIN_EXPECT(b, t) b
|
||||
#endif
|
||||
|
||||
// Likeliness annotations
|
||||
//
|
||||
// Useful when the author has better knowledge than the compiler of whether
|
||||
// the branch condition is overwhelmingly likely to take a specific value.
|
||||
//
|
||||
// Useful when the author has better knowledge than the compiler of which code
|
||||
// paths are designed as the fast path and which are designed as the slow path,
|
||||
// and to force the compiler to optimize for the fast path, even when it is not
|
||||
// overwhelmingly likely.
|
||||
|
||||
#define FOLLY_LIKELY(x) FOLLY_DETAIL_BUILTIN_EXPECT((x), 1)
|
||||
#define FOLLY_UNLIKELY(x) FOLLY_DETAIL_BUILTIN_EXPECT((x), 0)
|
||||
|
||||
// Un-namespaced annotations
|
||||
|
||||
#undef LIKELY
|
||||
#undef UNLIKELY
|
||||
|
||||
#if defined(__GNUC__) && __GNUC__ >= 4
|
||||
#define LIKELY(x) (__builtin_expect((x), 1))
|
||||
#if defined(__GNUC__)
|
||||
#define LIKELY(x) (__builtin_expect((x), 1))
|
||||
#define UNLIKELY(x) (__builtin_expect((x), 0))
|
||||
#else
|
||||
#define LIKELY(x) (x)
|
||||
#define LIKELY(x) (x)
|
||||
#define UNLIKELY(x) (x)
|
||||
#endif
|
||||
|
277
ios/Pods/Folly/folly/LockTraits.h
generated
277
ios/Pods/Folly/folly/LockTraits.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2016-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -27,6 +27,8 @@
|
||||
#include <chrono>
|
||||
#include <type_traits>
|
||||
|
||||
#include <folly/functional/Invoke.h>
|
||||
|
||||
// Android, OSX, and Cygwin don't have timed mutexes
|
||||
#if defined(ANDROID) || defined(__ANDROID__) || defined(__APPLE__) || \
|
||||
defined(__CYGWIN__)
|
||||
@ -38,6 +40,13 @@
|
||||
namespace folly {
|
||||
namespace detail {
|
||||
|
||||
namespace member {
|
||||
FOLLY_CREATE_MEMBER_INVOKE_TRAITS(lock, lock);
|
||||
FOLLY_CREATE_MEMBER_INVOKE_TRAITS(try_lock_for, try_lock_for);
|
||||
FOLLY_CREATE_MEMBER_INVOKE_TRAITS(lock_shared, lock_shared);
|
||||
FOLLY_CREATE_MEMBER_INVOKE_TRAITS(lock_upgrade, lock_upgrade);
|
||||
} // namespace member
|
||||
|
||||
/**
|
||||
* An enum to describe the "level" of a mutex. The supported levels are
|
||||
* Unique - a normal mutex that supports only exclusive locking
|
||||
@ -71,46 +80,25 @@ struct MutexLevelValueImpl<true, true, true> {
|
||||
* mutex. This is used in conjunction with the above MutexLevel
|
||||
* specializations and the LockTraitsImpl to determine what functions are
|
||||
* supported by objects of type Mutex
|
||||
*
|
||||
* The implementation uses SINAE in the return value with trailing return
|
||||
* types to figure out what level a mutex is
|
||||
*/
|
||||
template <class Mutex>
|
||||
class LockInterfaceDispatcher {
|
||||
private:
|
||||
// assert that the mutex type has basic lock and unlock functions
|
||||
static_assert(
|
||||
std::is_same<decltype(std::declval<Mutex>().lock()), void>::value,
|
||||
member::lock::is_invocable<Mutex>::value,
|
||||
"The mutex type must support lock and unlock functions");
|
||||
|
||||
// Helper functions for implementing the traits using SFINAE
|
||||
template <class T>
|
||||
static auto timed_lock_test(T*) -> typename std::is_same<
|
||||
decltype(std::declval<T>().try_lock_for(std::chrono::milliseconds(0))),
|
||||
bool>::type;
|
||||
template <class T>
|
||||
static std::false_type timed_lock_test(...);
|
||||
|
||||
template <class T>
|
||||
static auto lock_shared_test(T*) -> typename std::
|
||||
is_same<decltype(std::declval<T>().lock_shared()), void>::type;
|
||||
template <class T>
|
||||
static std::false_type lock_shared_test(...);
|
||||
|
||||
template <class T>
|
||||
static auto lock_upgrade_test(T*) -> typename std::
|
||||
is_same<decltype(std::declval<T>().lock_upgrade()), void>::type;
|
||||
template <class T>
|
||||
static std::false_type lock_upgrade_test(...);
|
||||
using duration = std::chrono::milliseconds;
|
||||
|
||||
public:
|
||||
static constexpr bool has_lock_unique = true;
|
||||
static constexpr bool has_lock_timed =
|
||||
decltype(timed_lock_test<Mutex>(0))::value;
|
||||
member::try_lock_for::is_invocable<Mutex, duration>::value;
|
||||
static constexpr bool has_lock_shared =
|
||||
decltype(lock_shared_test<Mutex>(0))::value;
|
||||
member::lock_shared::is_invocable<Mutex>::value;
|
||||
static constexpr bool has_lock_upgrade =
|
||||
decltype(lock_upgrade_test<Mutex>(0))::value;
|
||||
member::lock_upgrade::is_invocable<Mutex>::value;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -142,6 +130,13 @@ struct LockTraitsImpl<Mutex, MutexLevel::UNIQUE, false> {
|
||||
static void unlock(Mutex& mutex) {
|
||||
mutex.unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to acquire the mutex
|
||||
*/
|
||||
static bool try_lock(Mutex& mutex) {
|
||||
return mutex.try_lock();
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
@ -168,6 +163,13 @@ struct LockTraitsImpl<Mutex, MutexLevel::SHARED, false>
|
||||
static void unlock_shared(Mutex& mutex) {
|
||||
mutex.unlock_shared();
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to acquire the mutex in shared mode
|
||||
*/
|
||||
static bool try_lock_shared(Mutex& mutex) {
|
||||
return mutex.try_lock_shared();
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
@ -175,6 +177,7 @@ struct LockTraitsImpl<Mutex, MutexLevel::SHARED, false>
|
||||
*
|
||||
* m.lock_upgrade()
|
||||
* m.unlock_upgrade()
|
||||
* m.try_lock_upgrade()
|
||||
*
|
||||
* m.unlock_upgrade_and_lock()
|
||||
*
|
||||
@ -218,6 +221,13 @@ struct LockTraitsImpl<Mutex, MutexLevel::UPGRADE, false>
|
||||
mutex.unlock_upgrade();
|
||||
}
|
||||
|
||||
/**
|
||||
* Try and acquire the lock in upgrade mode
|
||||
*/
|
||||
static bool try_lock_upgrade(Mutex& mutex) {
|
||||
return mutex.try_lock_upgrade();
|
||||
}
|
||||
|
||||
/**
|
||||
* Upgrade from an upgradable state to an exclusive state
|
||||
*/
|
||||
@ -338,7 +348,49 @@ struct LockTraitsImpl<Mutex, MutexLevel::UPGRADE, true>
|
||||
}
|
||||
};
|
||||
|
||||
} // detail
|
||||
/**
|
||||
* Unlock helpers
|
||||
*
|
||||
* These help in determining whether it is safe for Synchronized::LockedPtr
|
||||
* instances to be move assigned from one another. It is safe if they both
|
||||
* have the same unlock policy, and it is not if they don't have the same
|
||||
* unlock policy. For example
|
||||
*
|
||||
* auto wlock = synchronized.wlock();
|
||||
* wlock.unlock();
|
||||
*
|
||||
* wlock = synchronized.rlock();
|
||||
*
|
||||
* This code would try to release the shared lock with a call to unlock(),
|
||||
* resulting in possibly undefined behavior. By allowing the LockPolicy
|
||||
* classes (defined below) to know what their unlocking behavior is, we can
|
||||
* prevent against this by disabling unsafe conversions to and from
|
||||
* incompatible LockedPtr types (they are incompatible if the underlying
|
||||
* LockPolicy has different unlock policies.
|
||||
*/
|
||||
template <template <typename...> class LockTraits>
|
||||
struct UnlockPolicyExclusive {
|
||||
template <typename Mutex>
|
||||
static void unlock(Mutex& mutex) {
|
||||
LockTraits<Mutex>::unlock(mutex);
|
||||
}
|
||||
};
|
||||
template <template <typename...> class LockTraits>
|
||||
struct UnlockPolicyShared {
|
||||
template <typename Mutex>
|
||||
static void unlock(Mutex& mutex) {
|
||||
LockTraits<Mutex>::unlock_shared(mutex);
|
||||
}
|
||||
};
|
||||
template <template <typename...> class LockTraits>
|
||||
struct UnlockPolicyUpgrade {
|
||||
template <typename Mutex>
|
||||
static void unlock(Mutex& mutex) {
|
||||
LockTraits<Mutex>::unlock_upgrade(mutex);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
|
||||
/**
|
||||
* LockTraits describes details about a particular mutex type.
|
||||
@ -362,15 +414,18 @@ struct LockTraitsImpl<Mutex, MutexLevel::UPGRADE, true>
|
||||
* The following static methods always exist:
|
||||
* - lock(Mutex& mutex)
|
||||
* - unlock(Mutex& mutex)
|
||||
* - try_lock(Mutex& mutex)
|
||||
*
|
||||
* The following static methods may exist, depending on is_shared, is_timed
|
||||
* and is_upgrade:
|
||||
* - lock_shared()
|
||||
* - try_lock_shared()
|
||||
*
|
||||
* - try_lock_for()
|
||||
* - try_lock_shared_for()
|
||||
*
|
||||
* - lock_upgrade()
|
||||
* - try_lock_upgrade()
|
||||
* - unlock_upgrade_and_lock()
|
||||
* - unlock_and_lock_upgrade()
|
||||
* - unlock_and_lock_shared()
|
||||
@ -401,71 +456,22 @@ struct LockTraitsBase
|
||||
template <class Mutex>
|
||||
struct LockTraits : public LockTraitsBase<Mutex> {};
|
||||
|
||||
/**
|
||||
* If the lock is a shared lock, acquire it in shared mode.
|
||||
* Otherwise, for plain (exclusive-only) locks, perform a normal acquire.
|
||||
*/
|
||||
template <class Mutex>
|
||||
typename std::enable_if<LockTraits<Mutex>::is_shared>::type
|
||||
lock_shared_or_unique(Mutex& mutex) {
|
||||
LockTraits<Mutex>::lock_shared(mutex);
|
||||
}
|
||||
template <class Mutex>
|
||||
typename std::enable_if<!LockTraits<Mutex>::is_shared>::type
|
||||
lock_shared_or_unique(Mutex& mutex) {
|
||||
LockTraits<Mutex>::lock(mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* If the lock is a shared lock, try to acquire it in shared mode, for up to
|
||||
* the given timeout. Otherwise, for plain (exclusive-only) locks, try to
|
||||
* perform a normal acquire.
|
||||
*
|
||||
* Returns true if the lock was acquired, or false on time out.
|
||||
*/
|
||||
template <class Mutex, class Rep, class Period>
|
||||
typename std::enable_if<LockTraits<Mutex>::is_shared, bool>::type
|
||||
try_lock_shared_or_unique_for(
|
||||
Mutex& mutex,
|
||||
const std::chrono::duration<Rep, Period>& timeout) {
|
||||
return LockTraits<Mutex>::try_lock_shared_for(mutex, timeout);
|
||||
}
|
||||
template <class Mutex, class Rep, class Period>
|
||||
typename std::enable_if<!LockTraits<Mutex>::is_shared, bool>::type
|
||||
try_lock_shared_or_unique_for(
|
||||
Mutex& mutex,
|
||||
const std::chrono::duration<Rep, Period>& timeout) {
|
||||
return LockTraits<Mutex>::try_lock_for(mutex, timeout);
|
||||
}
|
||||
|
||||
/**
|
||||
* Release a lock acquired with lock_shared_or_unique()
|
||||
*/
|
||||
template <class Mutex>
|
||||
typename std::enable_if<LockTraits<Mutex>::is_shared>::type
|
||||
unlock_shared_or_unique(Mutex& mutex) {
|
||||
LockTraits<Mutex>::unlock_shared(mutex);
|
||||
}
|
||||
template <class Mutex>
|
||||
typename std::enable_if<!LockTraits<Mutex>::is_shared>::type
|
||||
unlock_shared_or_unique(Mutex& mutex) {
|
||||
LockTraits<Mutex>::unlock(mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
* Lock policy classes.
|
||||
*
|
||||
* These can be used as template parameters to provide compile-time
|
||||
* selection over the type of lock operation to perform.
|
||||
*/
|
||||
|
||||
/**
|
||||
* A lock policy that performs exclusive lock operations.
|
||||
*/
|
||||
struct LockPolicyExclusive {
|
||||
struct LockPolicyExclusive : detail::UnlockPolicyExclusive<LockTraits> {
|
||||
using UnlockPolicy = detail::UnlockPolicyExclusive<LockTraits>;
|
||||
|
||||
template <class Mutex>
|
||||
static void lock(Mutex& mutex) {
|
||||
static std::true_type lock(Mutex& mutex) {
|
||||
LockTraits<Mutex>::lock(mutex);
|
||||
return std::true_type{};
|
||||
}
|
||||
template <class Mutex, class Rep, class Period>
|
||||
static bool try_lock_for(
|
||||
@ -473,20 +479,19 @@ struct LockPolicyExclusive {
|
||||
const std::chrono::duration<Rep, Period>& timeout) {
|
||||
return LockTraits<Mutex>::try_lock_for(mutex, timeout);
|
||||
}
|
||||
template <class Mutex>
|
||||
static void unlock(Mutex& mutex) {
|
||||
LockTraits<Mutex>::unlock(mutex);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* A lock policy that performs shared lock operations.
|
||||
* This policy only works with shared mutex types.
|
||||
*/
|
||||
struct LockPolicyShared {
|
||||
struct LockPolicyShared : detail::UnlockPolicyShared<LockTraits> {
|
||||
using UnlockPolicy = detail::UnlockPolicyShared<LockTraits>;
|
||||
|
||||
template <class Mutex>
|
||||
static void lock(Mutex& mutex) {
|
||||
static std::true_type lock(Mutex& mutex) {
|
||||
LockTraits<Mutex>::lock_shared(mutex);
|
||||
return std::true_type{};
|
||||
}
|
||||
template <class Mutex, class Rep, class Period>
|
||||
static bool try_lock_for(
|
||||
@ -494,31 +499,6 @@ struct LockPolicyShared {
|
||||
const std::chrono::duration<Rep, Period>& timeout) {
|
||||
return LockTraits<Mutex>::try_lock_shared_for(mutex, timeout);
|
||||
}
|
||||
template <class Mutex>
|
||||
static void unlock(Mutex& mutex) {
|
||||
LockTraits<Mutex>::unlock_shared(mutex);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* A lock policy that performs a shared lock operation if a shared mutex type
|
||||
* is given, or a normal exclusive lock operation on non-shared mutex types.
|
||||
*/
|
||||
struct LockPolicyShareable {
|
||||
template <class Mutex>
|
||||
static void lock(Mutex& mutex) {
|
||||
lock_shared_or_unique(mutex);
|
||||
}
|
||||
template <class Mutex, class Rep, class Period>
|
||||
static bool try_lock_for(
|
||||
Mutex& mutex,
|
||||
const std::chrono::duration<Rep, Period>& timeout) {
|
||||
return try_lock_shared_or_unique_for(mutex, timeout);
|
||||
}
|
||||
template <class Mutex>
|
||||
static void unlock(Mutex& mutex) {
|
||||
unlock_shared_or_unique(mutex);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
@ -528,10 +508,13 @@ struct LockPolicyShareable {
|
||||
* unlock() -> unlock_upgrade()
|
||||
* try_lock_for -> try_lock_upgrade_for()
|
||||
*/
|
||||
struct LockPolicyUpgrade {
|
||||
struct LockPolicyUpgrade : detail::UnlockPolicyUpgrade<LockTraits> {
|
||||
using UnlockPolicy = detail::UnlockPolicyUpgrade<LockTraits>;
|
||||
|
||||
template <class Mutex>
|
||||
static void lock(Mutex& mutex) {
|
||||
static std::true_type lock(Mutex& mutex) {
|
||||
LockTraits<Mutex>::lock_upgrade(mutex);
|
||||
return std::true_type{};
|
||||
}
|
||||
template <class Mutex, class Rep, class Period>
|
||||
static bool try_lock_for(
|
||||
@ -539,9 +522,47 @@ struct LockPolicyUpgrade {
|
||||
const std::chrono::duration<Rep, Period>& timeout) {
|
||||
return LockTraits<Mutex>::try_lock_upgrade_for(mutex, timeout);
|
||||
}
|
||||
};
|
||||
|
||||
/*****************************************************************************
|
||||
* Policies for optional mutex locking
|
||||
****************************************************************************/
|
||||
/**
|
||||
* A lock policy that tries to acquire write locks and returns true or false
|
||||
* based on whether the lock operation succeeds
|
||||
*/
|
||||
struct LockPolicyTryExclusive : detail::UnlockPolicyExclusive<LockTraits> {
|
||||
using UnlockPolicy = detail::UnlockPolicyExclusive<LockTraits>;
|
||||
|
||||
template <class Mutex>
|
||||
static void unlock(Mutex& mutex) {
|
||||
LockTraits<Mutex>::unlock_upgrade(mutex);
|
||||
static bool lock(Mutex& mutex) {
|
||||
return LockTraits<Mutex>::try_lock(mutex);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* A lock policy that tries to acquire a read lock and returns true or false
|
||||
* based on whether the lock operation succeeds
|
||||
*/
|
||||
struct LockPolicyTryShared : detail::UnlockPolicyShared<LockTraits> {
|
||||
using UnlockPolicy = detail::UnlockPolicyShared<LockTraits>;
|
||||
|
||||
template <class Mutex>
|
||||
static bool lock(Mutex& mutex) {
|
||||
return LockTraits<Mutex>::try_lock_shared(mutex);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* A lock policy that tries to acquire an upgrade lock and returns true or
|
||||
* false based on whether the lock operation succeeds
|
||||
*/
|
||||
struct LockPolicyTryUpgrade : detail::UnlockPolicyUpgrade<LockTraits> {
|
||||
using UnlockPolicy = detail::UnlockPolicyUpgrade<LockTraits>;
|
||||
|
||||
template <class Mutex>
|
||||
static bool lock(Mutex& mutex) {
|
||||
return LockTraits<Mutex>::try_lock_upgrade(mutex);
|
||||
}
|
||||
};
|
||||
|
||||
@ -555,10 +576,11 @@ struct LockPolicyUpgrade {
|
||||
* unlock() -> unlock()
|
||||
* try_lock_for -> try_unlock_upgrade_and_lock_for()
|
||||
*/
|
||||
struct LockPolicyFromUpgradeToExclusive : public LockPolicyExclusive {
|
||||
struct LockPolicyFromUpgradeToExclusive : LockPolicyExclusive {
|
||||
template <class Mutex>
|
||||
static void lock(Mutex& mutex) {
|
||||
static std::true_type lock(Mutex& mutex) {
|
||||
LockTraits<Mutex>::unlock_upgrade_and_lock(mutex);
|
||||
return std::true_type{};
|
||||
}
|
||||
template <class Mutex, class Rep, class Period>
|
||||
static bool try_lock_for(
|
||||
@ -575,10 +597,11 @@ struct LockPolicyFromUpgradeToExclusive : public LockPolicyExclusive {
|
||||
* unlock() -> unlock_upgrade()
|
||||
* try_lock_for -> unlock_and_lock_upgrade()
|
||||
*/
|
||||
struct LockPolicyFromExclusiveToUpgrade : public LockPolicyUpgrade {
|
||||
struct LockPolicyFromExclusiveToUpgrade : LockPolicyUpgrade {
|
||||
template <class Mutex>
|
||||
static void lock(Mutex& mutex) {
|
||||
static std::true_type lock(Mutex& mutex) {
|
||||
LockTraits<Mutex>::unlock_and_lock_upgrade(mutex);
|
||||
return std::true_type{};
|
||||
}
|
||||
template <class Mutex, class Rep, class Period>
|
||||
static bool try_lock_for(
|
||||
@ -598,10 +621,11 @@ struct LockPolicyFromExclusiveToUpgrade : public LockPolicyUpgrade {
|
||||
* unlock() -> unlock_shared()
|
||||
* try_lock_for -> unlock_upgrade_and_lock_shared()
|
||||
*/
|
||||
struct LockPolicyFromUpgradeToShared : public LockPolicyShared {
|
||||
struct LockPolicyFromUpgradeToShared : LockPolicyShared {
|
||||
template <class Mutex>
|
||||
static void lock(Mutex& mutex) {
|
||||
static std::true_type lock(Mutex& mutex) {
|
||||
LockTraits<Mutex>::unlock_upgrade_and_lock_shared(mutex);
|
||||
return std::true_type{};
|
||||
}
|
||||
template <class Mutex, class Rep, class Period>
|
||||
static bool try_lock_for(
|
||||
@ -621,10 +645,11 @@ struct LockPolicyFromUpgradeToShared : public LockPolicyShared {
|
||||
* unlock() -> unlock_shared()
|
||||
* try_lock_for() -> unlock_and_lock_shared()
|
||||
*/
|
||||
struct LockPolicyFromExclusiveToShared : public LockPolicyShared {
|
||||
struct LockPolicyFromExclusiveToShared : LockPolicyShared {
|
||||
template <class Mutex>
|
||||
static void lock(Mutex& mutex) {
|
||||
static std::true_type lock(Mutex& mutex) {
|
||||
LockTraits<Mutex>::unlock_and_lock_shared(mutex);
|
||||
return std::true_type{};
|
||||
}
|
||||
template <class Mutex, class Rep, class Period>
|
||||
static bool try_lock_for(
|
||||
@ -637,4 +662,4 @@ struct LockPolicyFromExclusiveToShared : public LockPolicyShared {
|
||||
}
|
||||
};
|
||||
|
||||
} // folly
|
||||
} // namespace folly
|
||||
|
7
ios/Pods/Folly/folly/LockTraitsBoost.h
generated
7
ios/Pods/Folly/folly/LockTraitsBoost.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2016-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -27,7 +27,6 @@
|
||||
|
||||
#if FOLLY_LOCK_TRAITS_HAVE_TIMED_MUTEXES
|
||||
|
||||
|
||||
namespace folly {
|
||||
|
||||
namespace detail {
|
||||
@ -37,7 +36,7 @@ boost::chrono::duration<Rep, boost::ratio<Num, Denom>> toBoostDuration(
|
||||
const std::chrono::duration<Rep, std::ratio<Num, Denom>>& d) {
|
||||
return boost::chrono::duration<Rep, boost::ratio<Num, Denom>>(d.count());
|
||||
}
|
||||
}
|
||||
} // namespace detail
|
||||
|
||||
/**
|
||||
* LockTraits specialization for boost::shared_mutex
|
||||
@ -96,6 +95,6 @@ struct LockTraits<boost::recursive_timed_mutex>
|
||||
return mutex.try_lock_for(detail::toBoostDuration(timeout));
|
||||
}
|
||||
};
|
||||
} // folly
|
||||
} // namespace folly
|
||||
|
||||
#endif // FOLLY_LOCK_TRAITS_HAVE_TIMED_MUTEXES
|
||||
|
55
ios/Pods/Folly/folly/Logging.h
generated
55
ios/Pods/Folly/folly/Logging.h
generated
@ -1,55 +0,0 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <glog/logging.h>
|
||||
|
||||
#ifndef FB_LOG_EVERY_MS
|
||||
/**
|
||||
* Issues a LOG(severity) no more often than every
|
||||
* milliseconds. Example:
|
||||
*
|
||||
* FB_LOG_EVERY_MS(INFO, 10000) << "At least ten seconds passed"
|
||||
* " since you last saw this.";
|
||||
*
|
||||
* The implementation uses for statements to introduce variables in
|
||||
* a nice way that doesn't mess surrounding statements. It is thread
|
||||
* safe. Non-positive intervals will always log.
|
||||
*/
|
||||
#define FB_LOG_EVERY_MS(severity, milli_interval) \
|
||||
for (decltype(milli_interval) FB_LEM_once = 1, \
|
||||
FB_LEM_interval = (milli_interval); \
|
||||
FB_LEM_once; ) \
|
||||
for (::std::chrono::milliseconds::rep FB_LEM_prev, FB_LEM_now = \
|
||||
FB_LEM_interval <= 0 ? 0 : \
|
||||
::std::chrono::duration_cast< ::std::chrono::milliseconds>( \
|
||||
::std::chrono::system_clock::now().time_since_epoch() \
|
||||
).count(); \
|
||||
FB_LEM_once; ) \
|
||||
for (static ::std::atomic< ::std::chrono::milliseconds::rep> \
|
||||
FB_LEM_hist; FB_LEM_once; FB_LEM_once = 0) \
|
||||
if (FB_LEM_interval > 0 && \
|
||||
(FB_LEM_now - (FB_LEM_prev = \
|
||||
FB_LEM_hist.load(std::memory_order_acquire)) < \
|
||||
FB_LEM_interval || \
|
||||
!FB_LEM_hist.compare_exchange_strong(FB_LEM_prev,FB_LEM_now))) {\
|
||||
} else \
|
||||
LOG(severity)
|
||||
|
||||
#endif
|
125
ios/Pods/Folly/folly/MPMCPipeline.h
generated
125
ios/Pods/Folly/folly/MPMCPipeline.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2013-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -20,6 +20,7 @@
|
||||
|
||||
#include <glog/logging.h>
|
||||
|
||||
#include <folly/Portability.h>
|
||||
#include <folly/detail/MPMCPipelineDetail.h>
|
||||
|
||||
namespace folly {
|
||||
@ -27,7 +28,8 @@ namespace folly {
|
||||
/**
|
||||
* Helper tag template to use amplification > 1
|
||||
*/
|
||||
template <class T, size_t Amp> class MPMCPipelineStage;
|
||||
template <class T, size_t Amp>
|
||||
class MPMCPipelineStage;
|
||||
|
||||
/**
|
||||
* Multi-Producer, Multi-Consumer pipeline.
|
||||
@ -91,15 +93,46 @@ template <class T, size_t Amp> class MPMCPipelineStage;
|
||||
* all slots are filled (and therefore the queue doesn't freeze) because
|
||||
* we require that each step produces exactly K outputs for every input.
|
||||
*/
|
||||
template <class In, class... Stages> class MPMCPipeline {
|
||||
template <class In, class... Stages>
|
||||
class MPMCPipeline {
|
||||
typedef std::tuple<detail::PipelineStageInfo<Stages>...> StageInfos;
|
||||
typedef std::tuple<
|
||||
detail::MPMCPipelineStageImpl<In>,
|
||||
detail::MPMCPipelineStageImpl<
|
||||
typename detail::PipelineStageInfo<Stages>::value_type>...>
|
||||
StageTuple;
|
||||
detail::MPMCPipelineStageImpl<In>,
|
||||
detail::MPMCPipelineStageImpl<
|
||||
typename detail::PipelineStageInfo<Stages>::value_type>...>
|
||||
StageTuple;
|
||||
static constexpr size_t kAmplification =
|
||||
detail::AmplificationProduct<StageInfos>::value;
|
||||
detail::AmplificationProduct<StageInfos>::value;
|
||||
|
||||
class TicketBaseDebug {
|
||||
public:
|
||||
TicketBaseDebug() noexcept : owner_(nullptr), value_(0xdeadbeeffaceb00c) {}
|
||||
TicketBaseDebug(TicketBaseDebug&& other) noexcept
|
||||
: owner_(std::exchange(other.owner_, nullptr)),
|
||||
value_(std::exchange(other.value_, 0xdeadbeeffaceb00c)) {}
|
||||
explicit TicketBaseDebug(MPMCPipeline* owner, uint64_t value) noexcept
|
||||
: owner_(owner), value_(value) {}
|
||||
void check_owner(MPMCPipeline* owner) const {
|
||||
CHECK(owner == owner_);
|
||||
}
|
||||
|
||||
MPMCPipeline* owner_;
|
||||
uint64_t value_;
|
||||
};
|
||||
|
||||
class TicketBaseNDebug {
|
||||
public:
|
||||
TicketBaseNDebug() = default;
|
||||
TicketBaseNDebug(TicketBaseNDebug&&) = default;
|
||||
explicit TicketBaseNDebug(MPMCPipeline*, uint64_t value) noexcept
|
||||
: value_(value) {}
|
||||
void check_owner(MPMCPipeline*) const {}
|
||||
|
||||
uint64_t value_;
|
||||
};
|
||||
|
||||
using TicketBase =
|
||||
std::conditional_t<kIsDebug, TicketBaseDebug, TicketBaseNDebug>;
|
||||
|
||||
public:
|
||||
/**
|
||||
@ -107,35 +140,17 @@ template <class In, class... Stages> class MPMCPipeline {
|
||||
* blockingWriteStage. Tickets are not thread-safe.
|
||||
*/
|
||||
template <size_t Stage>
|
||||
class Ticket {
|
||||
class Ticket : TicketBase {
|
||||
public:
|
||||
~Ticket() noexcept {
|
||||
CHECK_EQ(remainingUses_, 0) << "All tickets must be completely used!";
|
||||
}
|
||||
|
||||
#ifndef NDEBUG
|
||||
Ticket() noexcept
|
||||
: owner_(nullptr),
|
||||
remainingUses_(0),
|
||||
value_(0xdeadbeeffaceb00c) {
|
||||
}
|
||||
#else
|
||||
Ticket() noexcept : remainingUses_(0) { }
|
||||
#endif
|
||||
Ticket() noexcept : remainingUses_(0) {}
|
||||
|
||||
Ticket(Ticket&& other) noexcept
|
||||
:
|
||||
#ifndef NDEBUG
|
||||
owner_(other.owner_),
|
||||
#endif
|
||||
remainingUses_(other.remainingUses_),
|
||||
value_(other.value_) {
|
||||
other.remainingUses_ = 0;
|
||||
#ifndef NDEBUG
|
||||
other.owner_ = nullptr;
|
||||
other.value_ = 0xdeadbeeffaceb00c;
|
||||
#endif
|
||||
}
|
||||
: TicketBase(static_cast<TicketBase&&>(other)),
|
||||
remainingUses_(std::exchange(other.remainingUses_, 0)) {}
|
||||
|
||||
Ticket& operator=(Ticket&& other) noexcept {
|
||||
if (this != &other) {
|
||||
@ -147,31 +162,16 @@ template <class In, class... Stages> class MPMCPipeline {
|
||||
|
||||
private:
|
||||
friend class MPMCPipeline;
|
||||
#ifndef NDEBUG
|
||||
MPMCPipeline* owner_;
|
||||
#endif
|
||||
size_t remainingUses_;
|
||||
uint64_t value_;
|
||||
|
||||
|
||||
Ticket(MPMCPipeline* owner, size_t amplification, uint64_t value) noexcept
|
||||
:
|
||||
#ifndef NDEBUG
|
||||
owner_(owner),
|
||||
#endif
|
||||
remainingUses_(amplification),
|
||||
value_(value * amplification) {
|
||||
(void)owner; // -Wunused-parameter
|
||||
}
|
||||
: TicketBase(owner, value * amplification),
|
||||
remainingUses_(amplification) {}
|
||||
|
||||
uint64_t use(MPMCPipeline* owner) {
|
||||
CHECK_GT(remainingUses_--, 0);
|
||||
#ifndef NDEBUG
|
||||
CHECK(owner == owner_);
|
||||
#else
|
||||
(void)owner; // -Wunused-parameter
|
||||
#endif
|
||||
return value_++;
|
||||
TicketBase::check_owner(owner);
|
||||
return TicketBase::value_++;
|
||||
}
|
||||
};
|
||||
|
||||
@ -185,7 +185,7 @@ template <class In, class... Stages> class MPMCPipeline {
|
||||
* Construct a pipeline with N+1 queue sizes.
|
||||
*/
|
||||
template <class... Sizes>
|
||||
explicit MPMCPipeline(Sizes... sizes) : stages_(sizes...) { }
|
||||
explicit MPMCPipeline(Sizes... sizes) : stages_(sizes...) {}
|
||||
|
||||
/**
|
||||
* Push an element into (the first stage of) the pipeline. Blocking.
|
||||
@ -241,18 +241,15 @@ template <class In, class... Stages> class MPMCPipeline {
|
||||
*/
|
||||
template <size_t Stage, class... Args>
|
||||
void blockingWriteStage(Ticket<Stage>& ticket, Args&&... args) {
|
||||
std::get<Stage+1>(stages_).blockingWriteWithTicket(
|
||||
ticket.use(this),
|
||||
std::forward<Args>(args)...);
|
||||
std::get<Stage + 1>(stages_).blockingWriteWithTicket(
|
||||
ticket.use(this), std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
/**
|
||||
* Pop an element from (the final stage of) the pipeline. Blocking.
|
||||
*/
|
||||
void blockingRead(
|
||||
typename std::tuple_element<
|
||||
sizeof...(Stages),
|
||||
StageTuple>::type::value_type& elem) {
|
||||
void blockingRead(typename std::tuple_element<sizeof...(Stages), StageTuple>::
|
||||
type::value_type& elem) {
|
||||
std::get<sizeof...(Stages)>(stages_).blockingRead(elem);
|
||||
}
|
||||
|
||||
@ -260,10 +257,8 @@ template <class In, class... Stages> class MPMCPipeline {
|
||||
* Try to pop an element from (the final stage of) the pipeline.
|
||||
* Non-blocking.
|
||||
*/
|
||||
bool read(
|
||||
typename std::tuple_element<
|
||||
sizeof...(Stages),
|
||||
StageTuple>::type::value_type& elem) {
|
||||
bool read(typename std::tuple_element<sizeof...(Stages), StageTuple>::type::
|
||||
value_type& elem) {
|
||||
return std::get<sizeof...(Stages)>(stages_).read(elem);
|
||||
}
|
||||
|
||||
@ -275,13 +270,13 @@ template <class In, class... Stages> class MPMCPipeline {
|
||||
* in any queue) are also counted.
|
||||
*/
|
||||
ssize_t sizeGuess() const noexcept {
|
||||
return (std::get<0>(stages_).writeCount() * kAmplification -
|
||||
std::get<sizeof...(Stages)>(stages_).readCount());
|
||||
return ssize_t(
|
||||
std::get<0>(stages_).writeCount() * kAmplification -
|
||||
std::get<sizeof...(Stages)>(stages_).readCount());
|
||||
}
|
||||
|
||||
private:
|
||||
StageTuple stages_;
|
||||
};
|
||||
|
||||
|
||||
} // namespaces
|
||||
} // namespace folly
|
||||
|
778
ios/Pods/Folly/folly/MPMCQueue.h
generated
778
ios/Pods/Folly/folly/MPMCQueue.h
generated
File diff suppressed because it is too large
Load Diff
23
ios/Pods/Folly/folly/MacAddress.h
generated
23
ios/Pods/Folly/folly/MacAddress.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2014-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -18,8 +18,8 @@
|
||||
|
||||
#include <iosfwd>
|
||||
|
||||
#include <folly/Bits.h>
|
||||
#include <folly/Range.h>
|
||||
#include <folly/lang/Bits.h>
|
||||
|
||||
namespace folly {
|
||||
|
||||
@ -222,11 +222,24 @@ class MacAddress {
|
||||
|
||||
/* Define toAppend() so to<string> will work */
|
||||
template <class Tgt>
|
||||
typename std::enable_if<IsSomeString<Tgt>::value>::type
|
||||
toAppend(MacAddress address, Tgt* result) {
|
||||
typename std::enable_if<IsSomeString<Tgt>::value>::type toAppend(
|
||||
MacAddress address,
|
||||
Tgt* result) {
|
||||
toAppend(address.toString(), result);
|
||||
}
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, MacAddress address);
|
||||
|
||||
} // folly
|
||||
} // namespace folly
|
||||
|
||||
namespace std {
|
||||
|
||||
// Provide an implementation for std::hash<MacAddress>
|
||||
template <>
|
||||
struct hash<folly::MacAddress> {
|
||||
size_t operator()(const folly::MacAddress& address) const {
|
||||
return std::hash<uint64_t>()(address.u64HBO());
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace std
|
||||
|
224
ios/Pods/Folly/folly/MapUtil.h
generated
224
ios/Pods/Folly/folly/MapUtil.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2012-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -18,6 +18,8 @@
|
||||
|
||||
#include <folly/Conv.h>
|
||||
#include <folly/Optional.h>
|
||||
#include <folly/functional/Invoke.h>
|
||||
#include <tuple>
|
||||
|
||||
namespace folly {
|
||||
|
||||
@ -25,13 +27,21 @@ namespace folly {
|
||||
* Given a map and a key, return the value corresponding to the key in the map,
|
||||
* or a given default value if the key doesn't exist in the map.
|
||||
*/
|
||||
template <class Map>
|
||||
typename Map::mapped_type get_default(
|
||||
const Map& map, const typename Map::key_type& key,
|
||||
const typename Map::mapped_type& dflt =
|
||||
typename Map::mapped_type()) {
|
||||
template <typename Map, typename Key>
|
||||
typename Map::mapped_type get_default(const Map& map, const Key& key) {
|
||||
auto pos = map.find(key);
|
||||
return (pos != map.end() ? pos->second : dflt);
|
||||
return (pos != map.end()) ? (pos->second) : (typename Map::mapped_type{});
|
||||
}
|
||||
template <
|
||||
class Map,
|
||||
typename Key = typename Map::key_type,
|
||||
typename Value = typename Map::mapped_type,
|
||||
typename std::enable_if<!is_invocable<Value>::value>::type* = nullptr>
|
||||
typename Map::mapped_type
|
||||
get_default(const Map& map, const Key& key, Value&& dflt) {
|
||||
using M = typename Map::mapped_type;
|
||||
auto pos = map.find(key);
|
||||
return (pos != map.end()) ? (pos->second) : M(std::forward<Value>(dflt));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -40,12 +50,12 @@ typename Map::mapped_type get_default(
|
||||
*/
|
||||
template <
|
||||
class Map,
|
||||
typename Key = typename Map::key_type,
|
||||
typename Func,
|
||||
typename = typename std::enable_if<std::is_convertible<
|
||||
typename std::result_of<Func()>::type,
|
||||
typename Map::mapped_type>::value>::type>
|
||||
typename = typename std::enable_if<
|
||||
is_invocable_r<typename Map::mapped_type, Func>::value>::type>
|
||||
typename Map::mapped_type
|
||||
get_default(const Map& map, const typename Map::key_type& key, Func&& dflt) {
|
||||
get_default(const Map& map, const Key& key, Func&& dflt) {
|
||||
auto pos = map.find(key);
|
||||
return pos != map.end() ? pos->second : dflt();
|
||||
}
|
||||
@ -54,10 +64,13 @@ get_default(const Map& map, const typename Map::key_type& key, Func&& dflt) {
|
||||
* Given a map and a key, return the value corresponding to the key in the map,
|
||||
* or throw an exception of the specified type.
|
||||
*/
|
||||
template <class E = std::out_of_range, class Map>
|
||||
template <
|
||||
class E = std::out_of_range,
|
||||
class Map,
|
||||
typename Key = typename Map::key_type>
|
||||
const typename Map::mapped_type& get_or_throw(
|
||||
const Map& map,
|
||||
const typename Map::key_type& key,
|
||||
const Key& key,
|
||||
const std::string& exceptionStrPrefix = std::string()) {
|
||||
auto pos = map.find(key);
|
||||
if (pos != map.end()) {
|
||||
@ -66,10 +79,13 @@ const typename Map::mapped_type& get_or_throw(
|
||||
throw E(folly::to<std::string>(exceptionStrPrefix, key));
|
||||
}
|
||||
|
||||
template <class E = std::out_of_range, class Map>
|
||||
template <
|
||||
class E = std::out_of_range,
|
||||
class Map,
|
||||
typename Key = typename Map::key_type>
|
||||
typename Map::mapped_type& get_or_throw(
|
||||
Map& map,
|
||||
const typename Map::key_type& key,
|
||||
const Key& key,
|
||||
const std::string& exceptionStrPrefix = std::string()) {
|
||||
auto pos = map.find(key);
|
||||
if (pos != map.end()) {
|
||||
@ -82,9 +98,10 @@ typename Map::mapped_type& get_or_throw(
|
||||
* Given a map and a key, return a Optional<V> if the key exists and None if the
|
||||
* key does not exist in the map.
|
||||
*/
|
||||
template <class Map>
|
||||
template <class Map, typename Key = typename Map::key_type>
|
||||
folly::Optional<typename Map::mapped_type> get_optional(
|
||||
const Map& map, const typename Map::key_type& key) {
|
||||
const Map& map,
|
||||
const Key& key) {
|
||||
auto pos = map.find(key);
|
||||
if (pos != map.end()) {
|
||||
return folly::Optional<typename Map::mapped_type>(pos->second);
|
||||
@ -98,14 +115,33 @@ folly::Optional<typename Map::mapped_type> get_optional(
|
||||
* key in the map, or the given default reference if the key doesn't exist in
|
||||
* the map.
|
||||
*/
|
||||
template <class Map>
|
||||
template <class Map, typename Key = typename Map::key_type>
|
||||
const typename Map::mapped_type& get_ref_default(
|
||||
const Map& map, const typename Map::key_type& key,
|
||||
const Map& map,
|
||||
const Key& key,
|
||||
const typename Map::mapped_type& dflt) {
|
||||
auto pos = map.find(key);
|
||||
return (pos != map.end() ? pos->second : dflt);
|
||||
}
|
||||
|
||||
/**
|
||||
* Passing a temporary default value returns a dangling reference when it is
|
||||
* returned. Lifetime extension is broken by the indirection.
|
||||
* The caller must ensure that the default value outlives the reference returned
|
||||
* by get_ref_default().
|
||||
*/
|
||||
template <class Map, typename Key = typename Map::key_type>
|
||||
const typename Map::mapped_type& get_ref_default(
|
||||
const Map& map,
|
||||
const Key& key,
|
||||
typename Map::mapped_type&& dflt) = delete;
|
||||
|
||||
template <class Map, typename Key = typename Map::key_type>
|
||||
const typename Map::mapped_type& get_ref_default(
|
||||
const Map& map,
|
||||
const Key& key,
|
||||
const typename Map::mapped_type&& dflt) = delete;
|
||||
|
||||
/**
|
||||
* Given a map and a key, return a reference to the value corresponding to the
|
||||
* key in the map, or the given default reference if the key doesn't exist in
|
||||
@ -113,16 +149,14 @@ const typename Map::mapped_type& get_ref_default(
|
||||
*/
|
||||
template <
|
||||
class Map,
|
||||
typename Key = typename Map::key_type,
|
||||
typename Func,
|
||||
typename = typename std::enable_if<std::is_convertible<
|
||||
typename std::result_of<Func()>::type,
|
||||
const typename Map::mapped_type&>::value>::type,
|
||||
typename = typename std::enable_if<
|
||||
std::is_reference<typename std::result_of<Func()>::type>::value>::type>
|
||||
const typename Map::mapped_type& get_ref_default(
|
||||
const Map& map,
|
||||
const typename Map::key_type& key,
|
||||
Func&& dflt) {
|
||||
is_invocable_r<const typename Map::mapped_type&, Func>::value>::type,
|
||||
typename = typename std::enable_if<
|
||||
std::is_reference<invoke_result_t<Func>>::value>::type>
|
||||
const typename Map::mapped_type&
|
||||
get_ref_default(const Map& map, const Key& key, Func&& dflt) {
|
||||
auto pos = map.find(key);
|
||||
return (pos != map.end() ? pos->second : dflt());
|
||||
}
|
||||
@ -131,9 +165,8 @@ const typename Map::mapped_type& get_ref_default(
|
||||
* Given a map and a key, return a pointer to the value corresponding to the
|
||||
* key in the map, or nullptr if the key doesn't exist in the map.
|
||||
*/
|
||||
template <class Map>
|
||||
const typename Map::mapped_type* get_ptr(
|
||||
const Map& map, const typename Map::key_type& key) {
|
||||
template <class Map, typename Key = typename Map::key_type>
|
||||
const typename Map::mapped_type* get_ptr(const Map& map, const Key& key) {
|
||||
auto pos = map.find(key);
|
||||
return (pos != map.end() ? &pos->second : nullptr);
|
||||
}
|
||||
@ -141,11 +174,134 @@ const typename Map::mapped_type* get_ptr(
|
||||
/**
|
||||
* Non-const overload of the above.
|
||||
*/
|
||||
template <class Map>
|
||||
typename Map::mapped_type* get_ptr(
|
||||
Map& map, const typename Map::key_type& key) {
|
||||
template <class Map, typename Key = typename Map::key_type>
|
||||
typename Map::mapped_type* get_ptr(Map& map, const Key& key) {
|
||||
auto pos = map.find(key);
|
||||
return (pos != map.end() ? &pos->second : nullptr);
|
||||
}
|
||||
|
||||
} // namespace folly
|
||||
// TODO: Remove the return type computations when clang 3.5 and gcc 5.1 are
|
||||
// the minimum supported versions.
|
||||
namespace detail {
|
||||
template <
|
||||
class T,
|
||||
size_t pathLength,
|
||||
class = typename std::enable_if<(pathLength > 0)>::type>
|
||||
struct NestedMapType {
|
||||
using type = typename NestedMapType<T, pathLength - 1>::type::mapped_type;
|
||||
};
|
||||
|
||||
template <class T>
|
||||
struct NestedMapType<T, 1> {
|
||||
using type = typename T::mapped_type;
|
||||
};
|
||||
|
||||
template <typename... KeysDefault>
|
||||
struct DefaultType;
|
||||
|
||||
template <typename Default>
|
||||
struct DefaultType<Default> {
|
||||
using type = Default;
|
||||
};
|
||||
|
||||
template <typename Key, typename... KeysDefault>
|
||||
struct DefaultType<Key, KeysDefault...> {
|
||||
using type = typename DefaultType<KeysDefault...>::type;
|
||||
};
|
||||
|
||||
template <class... KeysDefault>
|
||||
auto extract_default(const KeysDefault&... keysDefault) ->
|
||||
typename DefaultType<KeysDefault...>::type const& {
|
||||
return std::get<sizeof...(KeysDefault) - 1>(std::tie(keysDefault...));
|
||||
}
|
||||
} // namespace detail
|
||||
|
||||
/**
|
||||
* Given a map of maps and a path of keys, return a Optional<V> if the nested
|
||||
* key exists and None if the nested keys does not exist in the map.
|
||||
*/
|
||||
template <class Map, class Key1, class Key2, class... Keys>
|
||||
auto get_optional(
|
||||
const Map& map,
|
||||
const Key1& key1,
|
||||
const Key2& key2,
|
||||
const Keys&... keys)
|
||||
-> folly::Optional<
|
||||
typename detail::NestedMapType<Map, 2 + sizeof...(Keys)>::type> {
|
||||
auto pos = map.find(key1);
|
||||
return pos != map.end() ? get_optional(pos->second, key2, keys...)
|
||||
: folly::none;
|
||||
}
|
||||
|
||||
/**
|
||||
* Given a map of maps and a path of keys, return a pointer to the nested value,
|
||||
* or nullptr if the key doesn't exist in the map.
|
||||
*/
|
||||
template <class Map, class Key1, class Key2, class... Keys>
|
||||
auto get_ptr(
|
||||
const Map& map,
|
||||
const Key1& key1,
|
||||
const Key2& key2,
|
||||
const Keys&... keys) ->
|
||||
typename detail::NestedMapType<Map, 2 + sizeof...(Keys)>::type const* {
|
||||
auto pos = map.find(key1);
|
||||
return pos != map.end() ? get_ptr(pos->second, key2, keys...) : nullptr;
|
||||
}
|
||||
|
||||
template <class Map, class Key1, class Key2, class... Keys>
|
||||
auto get_ptr(Map& map, const Key1& key1, const Key2& key2, const Keys&... keys)
|
||||
-> typename detail::NestedMapType<Map, 2 + sizeof...(Keys)>::type* {
|
||||
auto pos = map.find(key1);
|
||||
return pos != map.end() ? get_ptr(pos->second, key2, keys...) : nullptr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Given a map and a path of keys, return the value corresponding to the nested
|
||||
* value, or a given default value if the path doesn't exist in the map.
|
||||
* The default value is the last parameter, and is copied when returned.
|
||||
*/
|
||||
template <
|
||||
class Map,
|
||||
class Key1,
|
||||
class Key2,
|
||||
class... KeysDefault,
|
||||
typename = typename std::enable_if<sizeof...(KeysDefault) != 0>::type>
|
||||
auto get_default(
|
||||
const Map& map,
|
||||
const Key1& key1,
|
||||
const Key2& key2,
|
||||
const KeysDefault&... keysDefault) ->
|
||||
typename detail::NestedMapType<Map, 1 + sizeof...(KeysDefault)>::type {
|
||||
if (const auto* ptr = get_ptr(map, key1)) {
|
||||
return get_default(*ptr, key2, keysDefault...);
|
||||
}
|
||||
return detail::extract_default(keysDefault...);
|
||||
}
|
||||
|
||||
/**
|
||||
* Given a map and a path of keys, return a reference to the value corresponding
|
||||
* to the nested value, or the given default reference if the path doesn't exist
|
||||
* in the map.
|
||||
* The default value is the last parameter, and must be a lvalue reference.
|
||||
*/
|
||||
template <
|
||||
class Map,
|
||||
class Key1,
|
||||
class Key2,
|
||||
class... KeysDefault,
|
||||
typename = typename std::enable_if<sizeof...(KeysDefault) != 0>::type,
|
||||
typename = typename std::enable_if<std::is_lvalue_reference<
|
||||
typename detail::DefaultType<KeysDefault...>::type>::value>::type>
|
||||
auto get_ref_default(
|
||||
const Map& map,
|
||||
const Key1& key1,
|
||||
const Key2& key2,
|
||||
KeysDefault&&... keysDefault) ->
|
||||
typename detail::NestedMapType<Map, 1 + sizeof...(KeysDefault)>::type
|
||||
const& {
|
||||
if (const auto* ptr = get_ptr(map, key1)) {
|
||||
return get_ref_default(*ptr, key2, keysDefault...);
|
||||
}
|
||||
return detail::extract_default(keysDefault...);
|
||||
}
|
||||
} // namespace folly
|
||||
|
32
ios/Pods/Folly/folly/Math.h
generated
32
ios/Pods/Folly/folly/Math.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2016-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -101,12 +101,11 @@ inline constexpr T divRoundAwayBranchful(T num, T denom) {
|
||||
template <typename N, typename D>
|
||||
using IdivResultType = typename std::enable_if<
|
||||
std::is_integral<N>::value && std::is_integral<D>::value &&
|
||||
!std::is_same<N, bool>::value &&
|
||||
!std::is_same<D, bool>::value,
|
||||
!std::is_same<N, bool>::value && !std::is_same<D, bool>::value,
|
||||
decltype(N{1} / D{1})>::type;
|
||||
}
|
||||
} // namespace detail
|
||||
|
||||
#if defined(__arm__) && !FOLLY_A64
|
||||
#if defined(__arm__) && !FOLLY_AARCH64
|
||||
constexpr auto kIntegerDivisionGivesRemainder = false;
|
||||
#else
|
||||
constexpr auto kIntegerDivisionGivesRemainder = true;
|
||||
@ -132,9 +131,10 @@ constexpr auto kIntegerDivisionGivesRemainder = true;
|
||||
template <typename N, typename D>
|
||||
inline constexpr detail::IdivResultType<N, D> divFloor(N num, D denom) {
|
||||
using R = decltype(num / denom);
|
||||
return kIntegerDivisionGivesRemainder && std::is_signed<R>::value
|
||||
? detail::divFloorBranchless<R>(num, denom)
|
||||
: detail::divFloorBranchful<R>(num, denom);
|
||||
return detail::IdivResultType<N, D>(
|
||||
kIntegerDivisionGivesRemainder && std::is_signed<R>::value
|
||||
? detail::divFloorBranchless<R>(num, denom)
|
||||
: detail::divFloorBranchful<R>(num, denom));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -151,9 +151,10 @@ inline constexpr detail::IdivResultType<N, D> divFloor(N num, D denom) {
|
||||
template <typename N, typename D>
|
||||
inline constexpr detail::IdivResultType<N, D> divCeil(N num, D denom) {
|
||||
using R = decltype(num / denom);
|
||||
return kIntegerDivisionGivesRemainder && std::is_signed<R>::value
|
||||
? detail::divCeilBranchless<R>(num, denom)
|
||||
: detail::divCeilBranchful<R>(num, denom);
|
||||
return detail::IdivResultType<N, D>(
|
||||
kIntegerDivisionGivesRemainder && std::is_signed<R>::value
|
||||
? detail::divCeilBranchless<R>(num, denom)
|
||||
: detail::divCeilBranchful<R>(num, denom));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -175,7 +176,7 @@ inline constexpr detail::IdivResultType<N, D> divCeil(N num, D denom) {
|
||||
*/
|
||||
template <typename N, typename D>
|
||||
inline constexpr detail::IdivResultType<N, D> divTrunc(N num, D denom) {
|
||||
return num / denom;
|
||||
return detail::IdivResultType<N, D>(num / denom);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -193,9 +194,10 @@ inline constexpr detail::IdivResultType<N, D> divTrunc(N num, D denom) {
|
||||
template <typename N, typename D>
|
||||
inline constexpr detail::IdivResultType<N, D> divRoundAway(N num, D denom) {
|
||||
using R = decltype(num / denom);
|
||||
return kIntegerDivisionGivesRemainder && std::is_signed<R>::value
|
||||
? detail::divRoundAwayBranchless<R>(num, denom)
|
||||
: detail::divRoundAwayBranchful<R>(num, denom);
|
||||
return detail::IdivResultType<N, D>(
|
||||
kIntegerDivisionGivesRemainder && std::is_signed<R>::value
|
||||
? detail::divRoundAwayBranchless<R>(num, denom)
|
||||
: detail::divRoundAwayBranchful<R>(num, denom));
|
||||
}
|
||||
|
||||
} // namespace folly
|
||||
|
857
ios/Pods/Folly/folly/Memory.h
generated
857
ios/Pods/Folly/folly/Memory.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2013-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -16,18 +16,214 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <folly/Traits.h>
|
||||
|
||||
#include <cassert>
|
||||
#include <cerrno>
|
||||
#include <cstddef>
|
||||
#include <cstdlib>
|
||||
#include <exception>
|
||||
#include <limits>
|
||||
#include <memory>
|
||||
#include <stdexcept>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
|
||||
#include <folly/ConstexprMath.h>
|
||||
#include <folly/Likely.h>
|
||||
#include <folly/Traits.h>
|
||||
#include <folly/functional/Invoke.h>
|
||||
#include <folly/lang/Align.h>
|
||||
#include <folly/lang/Exception.h>
|
||||
#include <folly/portability/Config.h>
|
||||
#include <folly/portability/Malloc.h>
|
||||
|
||||
namespace folly {
|
||||
|
||||
#if _POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600 || \
|
||||
(defined(__ANDROID__) && (__ANDROID_API__ > 16)) || \
|
||||
(defined(__APPLE__) && \
|
||||
(__MAC_OS_X_VERSION_MIN_REQUIRED >= __MAC_10_6 || \
|
||||
__IPHONE_OS_VERSION_MIN_REQUIRED >= __IPHONE_3_0))
|
||||
|
||||
inline void* aligned_malloc(size_t size, size_t align) {
|
||||
// use posix_memalign, but mimic the behaviour of memalign
|
||||
void* ptr = nullptr;
|
||||
int rc = posix_memalign(&ptr, align, size);
|
||||
return rc == 0 ? (errno = 0, ptr) : (errno = rc, nullptr);
|
||||
}
|
||||
|
||||
inline void aligned_free(void* aligned_ptr) {
|
||||
free(aligned_ptr);
|
||||
}
|
||||
|
||||
#elif defined(_WIN32)
|
||||
|
||||
inline void* aligned_malloc(size_t size, size_t align) {
|
||||
return _aligned_malloc(size, align);
|
||||
}
|
||||
|
||||
inline void aligned_free(void* aligned_ptr) {
|
||||
_aligned_free(aligned_ptr);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
inline void* aligned_malloc(size_t size, size_t align) {
|
||||
return memalign(align, size);
|
||||
}
|
||||
|
||||
inline void aligned_free(void* aligned_ptr) {
|
||||
free(aligned_ptr);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
namespace detail {
|
||||
template <typename Alloc, size_t kAlign, bool kAllocate>
|
||||
void rawOverAlignedImpl(Alloc const& alloc, size_t n, void*& raw) {
|
||||
static_assert((kAlign & (kAlign - 1)) == 0, "Align must be a power of 2");
|
||||
|
||||
using AllocTraits = std::allocator_traits<Alloc>;
|
||||
using T = typename AllocTraits::value_type;
|
||||
|
||||
constexpr bool kCanBypass = std::is_same<Alloc, std::allocator<T>>::value;
|
||||
|
||||
// BaseType is a type that gives us as much alignment as we need if
|
||||
// we can get it naturally, otherwise it is aligned as max_align_t.
|
||||
// kBaseAlign is both the alignment and size of this type.
|
||||
constexpr size_t kBaseAlign = constexpr_min(kAlign, alignof(max_align_t));
|
||||
using BaseType = std::aligned_storage_t<kBaseAlign, kBaseAlign>;
|
||||
using BaseAllocTraits =
|
||||
typename AllocTraits::template rebind_traits<BaseType>;
|
||||
using BaseAlloc = typename BaseAllocTraits::allocator_type;
|
||||
static_assert(
|
||||
sizeof(BaseType) == kBaseAlign && alignof(BaseType) == kBaseAlign, "");
|
||||
|
||||
#if __cpp_sized_deallocation
|
||||
if (kCanBypass && kAlign == kBaseAlign) {
|
||||
// until std::allocator uses sized deallocation, it is worth the
|
||||
// effort to bypass it when we are able
|
||||
if (kAllocate) {
|
||||
raw = ::operator new(n * sizeof(T));
|
||||
} else {
|
||||
::operator delete(raw, n * sizeof(T));
|
||||
}
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (kCanBypass && kAlign > kBaseAlign) {
|
||||
// allocating as BaseType isn't sufficient to get alignment, but
|
||||
// since we can bypass Alloc we can use something like posix_memalign
|
||||
if (kAllocate) {
|
||||
raw = aligned_malloc(n * sizeof(T), kAlign);
|
||||
} else {
|
||||
aligned_free(raw);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// we're not allowed to bypass Alloc, or we don't want to
|
||||
BaseAlloc a(alloc);
|
||||
|
||||
// allocation size is counted in sizeof(BaseType)
|
||||
size_t quanta = (n * sizeof(T) + kBaseAlign - 1) / sizeof(BaseType);
|
||||
if (kAlign <= kBaseAlign) {
|
||||
// rebinding Alloc to BaseType is sufficient to get us the alignment
|
||||
// we want, happy path
|
||||
if (kAllocate) {
|
||||
raw = static_cast<void*>(
|
||||
std::addressof(*BaseAllocTraits::allocate(a, quanta)));
|
||||
} else {
|
||||
BaseAllocTraits::deallocate(
|
||||
a,
|
||||
std::pointer_traits<typename BaseAllocTraits::pointer>::pointer_to(
|
||||
*static_cast<BaseType*>(raw)),
|
||||
quanta);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Overaligned and custom allocator, our only option is to
|
||||
// overallocate and store a delta to the actual allocation just
|
||||
// before the returned ptr.
|
||||
//
|
||||
// If we give ourselves kAlign extra bytes, then since
|
||||
// sizeof(BaseType) divides kAlign we can meet alignment while
|
||||
// getting a prefix of one BaseType. If we happen to get a
|
||||
// kAlign-aligned block, then we can return a pointer to underlying
|
||||
// + kAlign, otherwise there will be at least kBaseAlign bytes in
|
||||
// the unused prefix of the first kAlign-aligned block.
|
||||
if (kAllocate) {
|
||||
char* base = reinterpret_cast<char*>(std::addressof(
|
||||
*BaseAllocTraits::allocate(a, quanta + kAlign / sizeof(BaseType))));
|
||||
size_t byteDelta =
|
||||
kAlign - (reinterpret_cast<uintptr_t>(base) & (kAlign - 1));
|
||||
raw = static_cast<void*>(base + byteDelta);
|
||||
static_cast<size_t*>(raw)[-1] = byteDelta;
|
||||
} else {
|
||||
size_t byteDelta = static_cast<size_t*>(raw)[-1];
|
||||
char* base = static_cast<char*>(raw) - byteDelta;
|
||||
BaseAllocTraits::deallocate(
|
||||
a,
|
||||
std::pointer_traits<typename BaseAllocTraits::pointer>::pointer_to(
|
||||
*reinterpret_cast<BaseType*>(base)),
|
||||
quanta + kAlign / sizeof(BaseType));
|
||||
}
|
||||
}
|
||||
} // namespace detail
|
||||
|
||||
// Works like std::allocator_traits<Alloc>::allocate, but handles
|
||||
// over-aligned types. Feel free to manually specify any power of two as
|
||||
// the Align template arg. Must be matched with deallocateOverAligned.
|
||||
// allocationBytesForOverAligned will give you the number of bytes that
|
||||
// this function actually requests.
|
||||
template <
|
||||
typename Alloc,
|
||||
size_t kAlign = alignof(typename std::allocator_traits<Alloc>::value_type)>
|
||||
typename std::allocator_traits<Alloc>::pointer allocateOverAligned(
|
||||
Alloc const& alloc,
|
||||
size_t n) {
|
||||
void* raw = nullptr;
|
||||
detail::rawOverAlignedImpl<Alloc, kAlign, true>(alloc, n, raw);
|
||||
return std::pointer_traits<typename std::allocator_traits<Alloc>::pointer>::
|
||||
pointer_to(
|
||||
*static_cast<typename std::allocator_traits<Alloc>::value_type*>(
|
||||
raw));
|
||||
}
|
||||
|
||||
template <
|
||||
typename Alloc,
|
||||
size_t kAlign = alignof(typename std::allocator_traits<Alloc>::value_type)>
|
||||
void deallocateOverAligned(
|
||||
Alloc const& alloc,
|
||||
typename std::allocator_traits<Alloc>::pointer ptr,
|
||||
size_t n) {
|
||||
void* raw = static_cast<void*>(std::addressof(*ptr));
|
||||
detail::rawOverAlignedImpl<Alloc, kAlign, false>(alloc, n, raw);
|
||||
}
|
||||
|
||||
template <
|
||||
typename Alloc,
|
||||
size_t kAlign = alignof(typename std::allocator_traits<Alloc>::value_type)>
|
||||
size_t allocationBytesForOverAligned(size_t n) {
|
||||
static_assert((kAlign & (kAlign - 1)) == 0, "Align must be a power of 2");
|
||||
|
||||
using AllocTraits = std::allocator_traits<Alloc>;
|
||||
using T = typename AllocTraits::value_type;
|
||||
|
||||
constexpr size_t kBaseAlign = constexpr_min(kAlign, alignof(max_align_t));
|
||||
|
||||
if (kAlign > kBaseAlign && std::is_same<Alloc, std::allocator<T>>::value) {
|
||||
return n * sizeof(T);
|
||||
} else {
|
||||
size_t quanta = (n * sizeof(T) + kBaseAlign - 1) / kBaseAlign;
|
||||
if (kAlign > kBaseAlign) {
|
||||
quanta += kAlign / kBaseAlign;
|
||||
}
|
||||
return quanta * kBaseAlign;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* For exception safety and consistency with make_shared. Erase me when
|
||||
* we have std::make_unique().
|
||||
@ -36,31 +232,29 @@ namespace folly {
|
||||
* @author Xu Ning (xning@fb.com)
|
||||
*/
|
||||
|
||||
#if __cplusplus >= 201402L || \
|
||||
(defined __cpp_lib_make_unique && __cpp_lib_make_unique >= 201304L) || \
|
||||
(defined(_MSC_VER) && _MSC_VER >= 1900)
|
||||
#if __cplusplus >= 201402L || __cpp_lib_make_unique >= 201304L || \
|
||||
(__ANDROID__ && __cplusplus >= 201300L) || _MSC_VER >= 1900
|
||||
|
||||
/* using override */ using std::make_unique;
|
||||
|
||||
#else
|
||||
|
||||
template<typename T, typename... Args>
|
||||
template <typename T, typename... Args>
|
||||
typename std::enable_if<!std::is_array<T>::value, std::unique_ptr<T>>::type
|
||||
make_unique(Args&&... args) {
|
||||
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
|
||||
}
|
||||
|
||||
// Allows 'make_unique<T[]>(10)'. (N3690 s20.9.1.4 p3-4)
|
||||
template<typename T>
|
||||
template <typename T>
|
||||
typename std::enable_if<std::is_array<T>::value, std::unique_ptr<T>>::type
|
||||
make_unique(const size_t n) {
|
||||
return std::unique_ptr<T>(new typename std::remove_extent<T>::type[n]());
|
||||
}
|
||||
|
||||
// Disallows 'make_unique<T[10]>()'. (N3690 s20.9.1.4 p5)
|
||||
template<typename T, typename... Args>
|
||||
typename std::enable_if<
|
||||
std::extent<T>::value != 0, std::unique_ptr<T>>::type
|
||||
template <typename T, typename... Args>
|
||||
typename std::enable_if<std::extent<T>::value != 0, std::unique_ptr<T>>::type
|
||||
make_unique(Args&&...) = delete;
|
||||
|
||||
#endif
|
||||
@ -85,7 +279,7 @@ make_unique(Args&&...) = delete;
|
||||
* buf = nullptr; // calls BIO_free(buf.get())
|
||||
*/
|
||||
|
||||
template <typename T, void(*f)(T*)>
|
||||
template <typename T, void (*f)(T*)>
|
||||
struct static_function_deleter {
|
||||
void operator()(T* t) const {
|
||||
f(t);
|
||||
@ -108,320 +302,425 @@ struct static_function_deleter {
|
||||
*
|
||||
* Useful when `T` is long, such as:
|
||||
*
|
||||
* using T = foobar::cpp2::FooBarServiceAsyncClient;
|
||||
* using T = foobar::FooBarAsyncClient;
|
||||
*/
|
||||
template <typename T, typename D>
|
||||
std::shared_ptr<T> to_shared_ptr(std::unique_ptr<T, D>&& ptr) {
|
||||
return std::shared_ptr<T>(std::move(ptr));
|
||||
}
|
||||
|
||||
using SysBufferDeleter = static_function_deleter<void, ::free>;
|
||||
using SysBufferUniquePtr = std::unique_ptr<void, SysBufferDeleter>;
|
||||
inline SysBufferUniquePtr allocate_sys_buffer(size_t size) {
|
||||
return SysBufferUniquePtr(::malloc(size));
|
||||
/**
|
||||
* to_weak_ptr
|
||||
*
|
||||
* Make a weak_ptr and return it from a shared_ptr without specifying the
|
||||
* template type parameter and letting the compiler deduce it.
|
||||
*
|
||||
* So you can write this:
|
||||
*
|
||||
* auto wptr = to_weak_ptr(getSomethingShared<T>());
|
||||
*
|
||||
* Instead of this:
|
||||
*
|
||||
* auto wptr = weak_ptr<T>(getSomethingShared<T>());
|
||||
*
|
||||
* Useful when `T` is long, such as:
|
||||
*
|
||||
* using T = foobar::FooBarAsyncClient;
|
||||
*/
|
||||
template <typename T>
|
||||
std::weak_ptr<T> to_weak_ptr(const std::shared_ptr<T>& ptr) {
|
||||
return std::weak_ptr<T>(ptr);
|
||||
}
|
||||
|
||||
/**
|
||||
* A SimpleAllocator must provide two methods:
|
||||
*
|
||||
* void* allocate(size_t size);
|
||||
* void deallocate(void* ptr);
|
||||
*
|
||||
* which, respectively, allocate a block of size bytes (aligned to the
|
||||
* maximum alignment required on your system), throwing std::bad_alloc
|
||||
* if the allocation can't be satisfied, and free a previously
|
||||
* allocated block.
|
||||
*
|
||||
* SysAlloc resembles the standard allocator.
|
||||
*/
|
||||
class SysAlloc {
|
||||
public:
|
||||
void* allocate(size_t size) {
|
||||
void* p = ::malloc(size);
|
||||
if (!p) throw std::bad_alloc();
|
||||
return p;
|
||||
}
|
||||
void deallocate(void* p) {
|
||||
::free(p);
|
||||
}
|
||||
namespace detail {
|
||||
template <typename T>
|
||||
struct lift_void_to_char {
|
||||
using type = T;
|
||||
};
|
||||
template <>
|
||||
struct lift_void_to_char<void> {
|
||||
using type = char;
|
||||
};
|
||||
} // namespace detail
|
||||
|
||||
/**
|
||||
* StlAllocator wraps a SimpleAllocator into a STL-compliant
|
||||
* allocator, maintaining an instance pointer to the simple allocator
|
||||
* object. The underlying SimpleAllocator object must outlive all
|
||||
* instances of StlAllocator using it.
|
||||
* SysAllocator
|
||||
*
|
||||
* But note that if you pass StlAllocator<MallocAllocator,...> to a
|
||||
* standard container it will be larger due to the contained state
|
||||
* pointer.
|
||||
*
|
||||
* @author: Tudor Bosman <tudorb@fb.com>
|
||||
* Resembles std::allocator, the default Allocator, but wraps std::malloc and
|
||||
* std::free.
|
||||
*/
|
||||
|
||||
// This would be so much simpler with std::allocator_traits, but gcc 4.6.2
|
||||
// doesn't support it.
|
||||
template <class Alloc, class T> class StlAllocator;
|
||||
|
||||
template <class Alloc> class StlAllocator<Alloc, void> {
|
||||
public:
|
||||
typedef void value_type;
|
||||
typedef void* pointer;
|
||||
typedef const void* const_pointer;
|
||||
|
||||
StlAllocator() : alloc_(nullptr) { }
|
||||
explicit StlAllocator(Alloc* a) : alloc_(a) { }
|
||||
|
||||
Alloc* alloc() const {
|
||||
return alloc_;
|
||||
}
|
||||
|
||||
template <class U> struct rebind {
|
||||
typedef StlAllocator<Alloc, U> other;
|
||||
};
|
||||
|
||||
bool operator!=(const StlAllocator<Alloc, void>& other) const {
|
||||
return alloc_ != other.alloc_;
|
||||
}
|
||||
|
||||
bool operator==(const StlAllocator<Alloc, void>& other) const {
|
||||
return alloc_ == other.alloc_;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
class SysAllocator {
|
||||
private:
|
||||
Alloc* alloc_;
|
||||
using Self = SysAllocator<T>;
|
||||
|
||||
public:
|
||||
using value_type = T;
|
||||
|
||||
T* allocate(size_t count) {
|
||||
using lifted = typename detail::lift_void_to_char<T>::type;
|
||||
auto const p = std::malloc(sizeof(lifted) * count);
|
||||
if (!p) {
|
||||
throw_exception<std::bad_alloc>();
|
||||
}
|
||||
return static_cast<T*>(p);
|
||||
}
|
||||
void deallocate(T* p, size_t /* count */) {
|
||||
std::free(p);
|
||||
}
|
||||
|
||||
friend bool operator==(Self const&, Self const&) noexcept {
|
||||
return true;
|
||||
}
|
||||
friend bool operator!=(Self const&, Self const&) noexcept {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
template <class Alloc, class T>
|
||||
class StlAllocator {
|
||||
public:
|
||||
typedef T value_type;
|
||||
typedef T* pointer;
|
||||
typedef const T* const_pointer;
|
||||
typedef T& reference;
|
||||
typedef const T& const_reference;
|
||||
|
||||
typedef ptrdiff_t difference_type;
|
||||
typedef size_t size_type;
|
||||
|
||||
StlAllocator() : alloc_(nullptr) { }
|
||||
explicit StlAllocator(Alloc* a) : alloc_(a) { }
|
||||
|
||||
template <class U> StlAllocator(const StlAllocator<Alloc, U>& other)
|
||||
: alloc_(other.alloc()) { }
|
||||
|
||||
T* allocate(size_t n, const void* /* hint */ = nullptr) {
|
||||
return static_cast<T*>(alloc_->allocate(n * sizeof(T)));
|
||||
}
|
||||
|
||||
void deallocate(T* p, size_t /* n */) { alloc_->deallocate(p); }
|
||||
|
||||
size_t max_size() const {
|
||||
return std::numeric_limits<size_t>::max();
|
||||
}
|
||||
|
||||
T* address(T& x) const {
|
||||
return std::addressof(x);
|
||||
}
|
||||
|
||||
const T* address(const T& x) const {
|
||||
return std::addressof(x);
|
||||
}
|
||||
|
||||
template <class... Args>
|
||||
void construct(T* p, Args&&... args) {
|
||||
new (p) T(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
void destroy(T* p) {
|
||||
p->~T();
|
||||
}
|
||||
|
||||
Alloc* alloc() const {
|
||||
return alloc_;
|
||||
}
|
||||
|
||||
template <class U> struct rebind {
|
||||
typedef StlAllocator<Alloc, U> other;
|
||||
};
|
||||
|
||||
bool operator!=(const StlAllocator<Alloc, T>& other) const {
|
||||
return alloc_ != other.alloc_;
|
||||
}
|
||||
|
||||
bool operator==(const StlAllocator<Alloc, T>& other) const {
|
||||
return alloc_ == other.alloc_;
|
||||
}
|
||||
|
||||
class DefaultAlign {
|
||||
private:
|
||||
Alloc* alloc_;
|
||||
using Self = DefaultAlign;
|
||||
std::size_t align_;
|
||||
|
||||
public:
|
||||
explicit DefaultAlign(std::size_t align) noexcept : align_(align) {
|
||||
assert(!(align_ < sizeof(void*)) && bool("bad align: too small"));
|
||||
assert(!(align_ & (align_ - 1)) && bool("bad align: not power-of-two"));
|
||||
}
|
||||
std::size_t operator()() const noexcept {
|
||||
return align_;
|
||||
}
|
||||
|
||||
friend bool operator==(Self const& a, Self const& b) noexcept {
|
||||
return a.align_ == b.align_;
|
||||
}
|
||||
friend bool operator!=(Self const& a, Self const& b) noexcept {
|
||||
return a.align_ != b.align_;
|
||||
}
|
||||
};
|
||||
|
||||
template <std::size_t Align>
|
||||
class FixedAlign {
|
||||
private:
|
||||
static_assert(!(Align < sizeof(void*)), "bad align: too small");
|
||||
static_assert(!(Align & (Align - 1)), "bad align: not power-of-two");
|
||||
using Self = FixedAlign<Align>;
|
||||
|
||||
public:
|
||||
constexpr std::size_t operator()() const noexcept {
|
||||
return Align;
|
||||
}
|
||||
|
||||
friend bool operator==(Self const&, Self const&) noexcept {
|
||||
return true;
|
||||
}
|
||||
friend bool operator!=(Self const&, Self const&) noexcept {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Helper function to obtain rebound allocators
|
||||
* AlignedSysAllocator
|
||||
*
|
||||
* @author: Marcelo Juchem <marcelo@fb.com>
|
||||
* Resembles std::allocator, the default Allocator, but wraps aligned_malloc and
|
||||
* aligned_free.
|
||||
*
|
||||
* Accepts a policy parameter for providing the alignment, which must:
|
||||
* * be invocable as std::size_t() noexcept, returning the alignment
|
||||
* * be noexcept-copy-constructible
|
||||
* * have noexcept operator==
|
||||
* * have noexcept operator!=
|
||||
* * not be final
|
||||
*
|
||||
* DefaultAlign and FixedAlign<std::size_t>, provided above, are valid policies.
|
||||
*/
|
||||
template <typename T, typename Allocator>
|
||||
typename Allocator::template rebind<T>::other rebind_allocator(
|
||||
Allocator const& allocator
|
||||
) {
|
||||
return typename Allocator::template rebind<T>::other(allocator);
|
||||
}
|
||||
template <typename T, typename Align = DefaultAlign>
|
||||
class AlignedSysAllocator : private Align {
|
||||
private:
|
||||
using Self = AlignedSysAllocator<T, Align>;
|
||||
|
||||
template <typename, typename>
|
||||
friend class AlignedSysAllocator;
|
||||
|
||||
constexpr Align const& align() const {
|
||||
return *this;
|
||||
}
|
||||
|
||||
public:
|
||||
static_assert(std::is_nothrow_copy_constructible<Align>::value, "");
|
||||
static_assert(is_nothrow_invocable_r<std::size_t, Align>::value, "");
|
||||
|
||||
using value_type = T;
|
||||
|
||||
using propagate_on_container_copy_assignment = std::true_type;
|
||||
using propagate_on_container_move_assignment = std::true_type;
|
||||
using propagate_on_container_swap = std::true_type;
|
||||
|
||||
using Align::Align;
|
||||
|
||||
// TODO: remove this ctor, which is required only by gcc49
|
||||
template <
|
||||
typename S = Align,
|
||||
_t<std::enable_if<std::is_default_constructible<S>::value, int>> = 0>
|
||||
constexpr AlignedSysAllocator() noexcept(noexcept(Align())) : Align() {}
|
||||
|
||||
template <typename U>
|
||||
constexpr explicit AlignedSysAllocator(
|
||||
AlignedSysAllocator<U, Align> const& other) noexcept
|
||||
: Align(other.align()) {}
|
||||
|
||||
T* allocate(size_t count) {
|
||||
using lifted = typename detail::lift_void_to_char<T>::type;
|
||||
auto const p = aligned_malloc(sizeof(lifted) * count, align()());
|
||||
if (!p) {
|
||||
if (FOLLY_UNLIKELY(errno != ENOMEM)) {
|
||||
std::terminate();
|
||||
}
|
||||
throw_exception<std::bad_alloc>();
|
||||
}
|
||||
return static_cast<T*>(p);
|
||||
}
|
||||
void deallocate(T* p, size_t /* count */) {
|
||||
aligned_free(p);
|
||||
}
|
||||
|
||||
friend bool operator==(Self const& a, Self const& b) noexcept {
|
||||
return a.align() == b.align();
|
||||
}
|
||||
friend bool operator!=(Self const& a, Self const& b) noexcept {
|
||||
return a.align() != b.align();
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* CxxAllocatorAdaptor
|
||||
*
|
||||
* A type conforming to C++ concept Allocator, delegating operations to an
|
||||
* unowned Inner which has this required interface:
|
||||
*
|
||||
* void* allocate(std::size_t)
|
||||
* void deallocate(void*, std::size_t)
|
||||
*
|
||||
* Note that Inner is *not* a C++ Allocator.
|
||||
*/
|
||||
template <typename T, class Inner>
|
||||
class CxxAllocatorAdaptor {
|
||||
private:
|
||||
using Self = CxxAllocatorAdaptor<T, Inner>;
|
||||
|
||||
template <typename U, typename UAlloc>
|
||||
friend class CxxAllocatorAdaptor;
|
||||
|
||||
std::reference_wrapper<Inner> ref_;
|
||||
|
||||
public:
|
||||
using value_type = T;
|
||||
|
||||
using propagate_on_container_copy_assignment = std::true_type;
|
||||
using propagate_on_container_move_assignment = std::true_type;
|
||||
using propagate_on_container_swap = std::true_type;
|
||||
|
||||
explicit CxxAllocatorAdaptor(Inner& ref) : ref_(ref) {}
|
||||
|
||||
template <typename U>
|
||||
explicit CxxAllocatorAdaptor(CxxAllocatorAdaptor<U, Inner> const& other)
|
||||
: ref_(other.ref_) {}
|
||||
|
||||
T* allocate(std::size_t n) {
|
||||
using lifted = typename detail::lift_void_to_char<T>::type;
|
||||
return static_cast<T*>(ref_.get().allocate(sizeof(lifted) * n));
|
||||
}
|
||||
void deallocate(T* p, std::size_t n) {
|
||||
using lifted = typename detail::lift_void_to_char<T>::type;
|
||||
ref_.get().deallocate(p, sizeof(lifted) * n);
|
||||
}
|
||||
|
||||
friend bool operator==(Self const& a, Self const& b) noexcept {
|
||||
return std::addressof(a.ref_.get()) == std::addressof(b.ref_.get());
|
||||
}
|
||||
friend bool operator!=(Self const& a, Self const& b) noexcept {
|
||||
return std::addressof(a.ref_.get()) != std::addressof(b.ref_.get());
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
* Helper classes/functions for creating a unique_ptr using a custom
|
||||
* allocator.
|
||||
* allocator_delete
|
||||
*
|
||||
* @author: Marcelo Juchem <marcelo@fb.com>
|
||||
* A deleter which automatically works with a given allocator.
|
||||
*
|
||||
* Derives from the allocator to take advantage of the empty base
|
||||
* optimization when possible.
|
||||
*/
|
||||
template <typename Alloc>
|
||||
class allocator_delete : private std::remove_reference<Alloc>::type {
|
||||
private:
|
||||
using allocator_type = typename std::remove_reference<Alloc>::type;
|
||||
using allocator_traits = std::allocator_traits<allocator_type>;
|
||||
using value_type = typename allocator_traits::value_type;
|
||||
using pointer = typename allocator_traits::pointer;
|
||||
|
||||
// Derives from the allocator to take advantage of the empty base
|
||||
// optimization when possible.
|
||||
template <typename Allocator>
|
||||
class allocator_delete
|
||||
: private std::remove_reference<Allocator>::type
|
||||
{
|
||||
typedef typename std::remove_reference<Allocator>::type allocator_type;
|
||||
|
||||
public:
|
||||
typedef typename Allocator::pointer pointer;
|
||||
|
||||
public:
|
||||
allocator_delete() = default;
|
||||
allocator_delete(allocator_delete const&) = default;
|
||||
allocator_delete(allocator_delete&&) = default;
|
||||
allocator_delete& operator=(allocator_delete const&) = default;
|
||||
allocator_delete& operator=(allocator_delete&&) = default;
|
||||
|
||||
explicit allocator_delete(const allocator_type& allocator)
|
||||
: allocator_type(allocator)
|
||||
{}
|
||||
explicit allocator_delete(const allocator_type& alloc)
|
||||
: allocator_type(alloc) {}
|
||||
|
||||
explicit allocator_delete(allocator_type&& allocator)
|
||||
: allocator_type(std::move(allocator))
|
||||
{}
|
||||
explicit allocator_delete(allocator_type&& alloc)
|
||||
: allocator_type(std::move(alloc)) {}
|
||||
|
||||
template <typename U>
|
||||
allocator_delete(const allocator_delete<U>& other)
|
||||
: allocator_type(other.get_allocator())
|
||||
{}
|
||||
: allocator_type(other.get_allocator()) {}
|
||||
|
||||
allocator_type& get_allocator() const {
|
||||
return *const_cast<allocator_delete*>(this);
|
||||
allocator_type const& get_allocator() const {
|
||||
return *this;
|
||||
}
|
||||
|
||||
void operator()(pointer p) const {
|
||||
if (!p) return;
|
||||
const_cast<allocator_delete*>(this)->destroy(p);
|
||||
const_cast<allocator_delete*>(this)->deallocate(p, 1);
|
||||
auto alloc = get_allocator();
|
||||
allocator_traits::destroy(alloc, p);
|
||||
allocator_traits::deallocate(alloc, p, 1);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename Allocator>
|
||||
class is_simple_allocator {
|
||||
FOLLY_CREATE_HAS_MEMBER_FN_TRAITS(has_destroy, destroy);
|
||||
|
||||
typedef typename std::remove_const<
|
||||
typename std::remove_reference<Allocator>::type
|
||||
>::type allocator;
|
||||
typedef typename std::remove_reference<T>::type value_type;
|
||||
typedef value_type* pointer;
|
||||
|
||||
public:
|
||||
constexpr static bool value = !has_destroy<allocator, void(pointer)>::value
|
||||
&& !has_destroy<allocator, void(void*)>::value;
|
||||
};
|
||||
|
||||
template <typename T, typename Allocator>
|
||||
struct as_stl_allocator {
|
||||
typedef typename std::conditional<
|
||||
is_simple_allocator<T, Allocator>::value,
|
||||
folly::StlAllocator<
|
||||
typename std::remove_reference<Allocator>::type,
|
||||
typename std::remove_reference<T>::type
|
||||
>,
|
||||
typename std::remove_reference<Allocator>::type
|
||||
>::type type;
|
||||
};
|
||||
|
||||
template <typename T, typename Allocator>
|
||||
typename std::enable_if<
|
||||
is_simple_allocator<T, Allocator>::value,
|
||||
folly::StlAllocator<
|
||||
typename std::remove_reference<Allocator>::type,
|
||||
typename std::remove_reference<T>::type
|
||||
>
|
||||
>::type make_stl_allocator(Allocator&& allocator) {
|
||||
return folly::StlAllocator<
|
||||
typename std::remove_reference<Allocator>::type,
|
||||
typename std::remove_reference<T>::type
|
||||
>(&allocator);
|
||||
}
|
||||
|
||||
template <typename T, typename Allocator>
|
||||
typename std::enable_if<
|
||||
!is_simple_allocator<T, Allocator>::value,
|
||||
typename std::remove_reference<Allocator>::type
|
||||
>::type make_stl_allocator(Allocator&& allocator) {
|
||||
return std::move(allocator);
|
||||
}
|
||||
|
||||
/**
|
||||
* AllocatorUniquePtr: a unique_ptr that supports both STL-style
|
||||
* allocators and SimpleAllocator
|
||||
*
|
||||
* @author: Marcelo Juchem <marcelo@fb.com>
|
||||
* allocate_unique, like std::allocate_shared but for std::unique_ptr
|
||||
*/
|
||||
|
||||
template <typename T, typename Allocator>
|
||||
struct AllocatorUniquePtr {
|
||||
typedef std::unique_ptr<T,
|
||||
folly::allocator_delete<
|
||||
typename std::conditional<
|
||||
is_simple_allocator<T, Allocator>::value,
|
||||
folly::StlAllocator<typename std::remove_reference<Allocator>::type, T>,
|
||||
typename std::remove_reference<Allocator>::type
|
||||
>::type
|
||||
>
|
||||
> type;
|
||||
};
|
||||
|
||||
/**
|
||||
* Functions to allocate a unique_ptr / shared_ptr, supporting both
|
||||
* STL-style allocators and SimpleAllocator, analog to std::allocate_shared
|
||||
*
|
||||
* @author: Marcelo Juchem <marcelo@fb.com>
|
||||
*/
|
||||
|
||||
template <typename T, typename Allocator, typename ...Args>
|
||||
typename AllocatorUniquePtr<T, Allocator>::type allocate_unique(
|
||||
Allocator&& allocator, Args&&... args
|
||||
) {
|
||||
auto stlAllocator = folly::make_stl_allocator<T>(
|
||||
std::forward<Allocator>(allocator)
|
||||
);
|
||||
auto p = stlAllocator.allocate(1);
|
||||
|
||||
try {
|
||||
stlAllocator.construct(p, std::forward<Args>(args)...);
|
||||
|
||||
return {p,
|
||||
folly::allocator_delete<decltype(stlAllocator)>(std::move(stlAllocator))
|
||||
};
|
||||
} catch (...) {
|
||||
stlAllocator.deallocate(p, 1);
|
||||
throw;
|
||||
template <typename T, typename Alloc, typename... Args>
|
||||
std::unique_ptr<T, allocator_delete<Alloc>> allocate_unique(
|
||||
Alloc const& alloc,
|
||||
Args&&... args) {
|
||||
using traits = std::allocator_traits<Alloc>;
|
||||
struct DeferCondDeallocate {
|
||||
bool& cond;
|
||||
Alloc& copy;
|
||||
T* p;
|
||||
~DeferCondDeallocate() {
|
||||
if (FOLLY_UNLIKELY(!cond)) {
|
||||
traits::deallocate(copy, p, 1);
|
||||
}
|
||||
}
|
||||
};
|
||||
auto copy = alloc;
|
||||
auto const p = traits::allocate(copy, 1);
|
||||
{
|
||||
bool constructed = false;
|
||||
DeferCondDeallocate handler{constructed, copy, p};
|
||||
traits::construct(copy, p, static_cast<Args&&>(args)...);
|
||||
constructed = true;
|
||||
}
|
||||
return {p, allocator_delete<Alloc>(std::move(copy))};
|
||||
}
|
||||
|
||||
template <typename T, typename Allocator, typename ...Args>
|
||||
std::shared_ptr<T> allocate_shared(Allocator&& allocator, Args&&... args) {
|
||||
return std::allocate_shared<T>(
|
||||
folly::make_stl_allocator<T>(std::forward<Allocator>(allocator)),
|
||||
std::forward<Args>(args)...
|
||||
);
|
||||
struct SysBufferDeleter {
|
||||
void operator()(void* ptr) {
|
||||
std::free(ptr);
|
||||
}
|
||||
};
|
||||
using SysBufferUniquePtr = std::unique_ptr<void, SysBufferDeleter>;
|
||||
|
||||
inline SysBufferUniquePtr allocate_sys_buffer(std::size_t size) {
|
||||
auto p = std::malloc(size);
|
||||
if (!p) {
|
||||
throw_exception<std::bad_alloc>();
|
||||
}
|
||||
return {p, {}};
|
||||
}
|
||||
|
||||
/**
|
||||
* IsArenaAllocator<T>::value describes whether SimpleAllocator has
|
||||
* no-op deallocate().
|
||||
* AllocatorHasTrivialDeallocate
|
||||
*
|
||||
* Unambiguously inherits std::integral_constant<bool, V> for some bool V.
|
||||
*
|
||||
* Describes whether a C++ Aallocator has trivial, i.e. no-op, deallocate().
|
||||
*
|
||||
* Also may be used to describe types which may be used with
|
||||
* CxxAllocatorAdaptor.
|
||||
*/
|
||||
template <class T> struct IsArenaAllocator : std::false_type { };
|
||||
template <typename Alloc>
|
||||
struct AllocatorHasTrivialDeallocate : std::false_type {};
|
||||
|
||||
} // namespace folly
|
||||
template <typename T, class Alloc>
|
||||
struct AllocatorHasTrivialDeallocate<CxxAllocatorAdaptor<T, Alloc>>
|
||||
: AllocatorHasTrivialDeallocate<Alloc> {};
|
||||
|
||||
namespace detail {
|
||||
// note that construct and destroy here are methods, not short names for
|
||||
// the constructor and destructor
|
||||
FOLLY_CREATE_MEMBER_INVOKE_TRAITS(AllocatorConstruct_, construct);
|
||||
FOLLY_CREATE_MEMBER_INVOKE_TRAITS(AllocatorDestroy_, destroy);
|
||||
|
||||
template <typename Void, typename Alloc, typename... Args>
|
||||
struct AllocatorCustomizesConstruct_
|
||||
: AllocatorConstruct_::template is_invocable<Alloc, Args...> {};
|
||||
|
||||
template <typename Alloc, typename... Args>
|
||||
struct AllocatorCustomizesConstruct_<
|
||||
void_t<typename Alloc::folly_has_default_object_construct>,
|
||||
Alloc,
|
||||
Args...> : Negation<typename Alloc::folly_has_default_object_construct> {};
|
||||
|
||||
template <typename Void, typename Alloc, typename... Args>
|
||||
struct AllocatorCustomizesDestroy_
|
||||
: AllocatorDestroy_::template is_invocable<Alloc, Args...> {};
|
||||
|
||||
template <typename Alloc, typename... Args>
|
||||
struct AllocatorCustomizesDestroy_<
|
||||
void_t<typename Alloc::folly_has_default_object_destroy>,
|
||||
Alloc,
|
||||
Args...> : Negation<typename Alloc::folly_has_default_object_destroy> {};
|
||||
} // namespace detail
|
||||
|
||||
/**
|
||||
* AllocatorHasDefaultObjectConstruct
|
||||
*
|
||||
* AllocatorHasDefaultObjectConstruct<A, T, Args...> unambiguously
|
||||
* inherits std::integral_constant<bool, V>, where V will be true iff
|
||||
* the effect of std::allocator_traits<A>::construct(a, p, args...) is
|
||||
* the same as new (static_cast<void*>(p)) T(args...). If true then
|
||||
* any optimizations applicable to object construction (relying on
|
||||
* std::is_trivially_copyable<T>, for example) can be applied to objects
|
||||
* in an allocator-aware container using an allocation of type A.
|
||||
*
|
||||
* Allocator types can override V by declaring a type alias for
|
||||
* folly_has_default_object_construct. It is helpful to do this if you
|
||||
* define a custom allocator type that defines a construct method, but
|
||||
* that method doesn't do anything except call placement new.
|
||||
*/
|
||||
template <typename Alloc, typename T, typename... Args>
|
||||
struct AllocatorHasDefaultObjectConstruct
|
||||
: Negation<
|
||||
detail::AllocatorCustomizesConstruct_<void, Alloc, T*, Args...>> {};
|
||||
|
||||
template <typename Value, typename T, typename... Args>
|
||||
struct AllocatorHasDefaultObjectConstruct<std::allocator<Value>, T, Args...>
|
||||
: std::true_type {};
|
||||
|
||||
/**
|
||||
* AllocatorHasDefaultObjectDestroy
|
||||
*
|
||||
* AllocatorHasDefaultObjectDestroy<A, T> unambiguously inherits
|
||||
* std::integral_constant<bool, V>, where V will be true iff the effect
|
||||
* of std::allocator_traits<A>::destroy(a, p) is the same as p->~T().
|
||||
* If true then optimizations applicable to object destruction (relying
|
||||
* on std::is_trivially_destructible<T>, for example) can be applied to
|
||||
* objects in an allocator-aware container using an allocator of type A.
|
||||
*
|
||||
* Allocator types can override V by declaring a type alias for
|
||||
* folly_has_default_object_destroy. It is helpful to do this if you
|
||||
* define a custom allocator type that defines a destroy method, but that
|
||||
* method doesn't do anything except call the object's destructor.
|
||||
*/
|
||||
template <typename Alloc, typename T>
|
||||
struct AllocatorHasDefaultObjectDestroy
|
||||
: Negation<detail::AllocatorCustomizesDestroy_<void, Alloc, T*>> {};
|
||||
|
||||
template <typename Value, typename T>
|
||||
struct AllocatorHasDefaultObjectDestroy<std::allocator<Value>, T>
|
||||
: std::true_type {};
|
||||
|
||||
} // namespace folly
|
||||
|
251
ios/Pods/Folly/folly/MemoryMapping.h
generated
251
ios/Pods/Folly/folly/MemoryMapping.h
generated
@ -1,251 +0,0 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <folly/FBString.h>
|
||||
#include <folly/File.h>
|
||||
#include <folly/Range.h>
|
||||
#include <glog/logging.h>
|
||||
#include <boost/noncopyable.hpp>
|
||||
|
||||
namespace folly {
|
||||
|
||||
/**
|
||||
* Maps files in memory (read-only).
|
||||
*
|
||||
* @author Tudor Bosman (tudorb@fb.com)
|
||||
*/
|
||||
class MemoryMapping : boost::noncopyable {
|
||||
public:
|
||||
/**
|
||||
* Lock the pages in memory?
|
||||
* TRY_LOCK = try to lock, log warning if permission denied
|
||||
* MUST_LOCK = lock, fail assertion if permission denied.
|
||||
*/
|
||||
enum class LockMode {
|
||||
TRY_LOCK,
|
||||
MUST_LOCK
|
||||
};
|
||||
/**
|
||||
* Map a portion of the file indicated by filename in memory, causing a CHECK
|
||||
* failure on error.
|
||||
*
|
||||
* By default, map the whole file. length=-1: map from offset to EOF.
|
||||
* Unlike the mmap() system call, offset and length don't need to be
|
||||
* page-aligned. length is clipped to the end of the file if it's too large.
|
||||
*
|
||||
* The mapping will be destroyed (and the memory pointed-to by data() will
|
||||
* likely become inaccessible) when the MemoryMapping object is destroyed.
|
||||
*/
|
||||
struct Options {
|
||||
Options() {}
|
||||
|
||||
// Convenience methods; return *this for chaining.
|
||||
Options& setPageSize(off_t v) { pageSize = v; return *this; }
|
||||
Options& setShared(bool v) { shared = v; return *this; }
|
||||
Options& setPrefault(bool v) { prefault = v; return *this; }
|
||||
Options& setReadable(bool v) { readable = v; return *this; }
|
||||
Options& setWritable(bool v) { writable = v; return *this; }
|
||||
Options& setGrow(bool v) { grow = v; return *this; }
|
||||
|
||||
// Page size. 0 = use appropriate page size.
|
||||
// (On Linux, we use a huge page size if the file is on a hugetlbfs
|
||||
// file system, and the default page size otherwise)
|
||||
off_t pageSize = 0;
|
||||
|
||||
// If shared (default), the memory mapping is shared with other processes
|
||||
// mapping the same file (or children); if not shared (private), each
|
||||
// process has its own mapping. Changes in writable, private mappings are
|
||||
// not reflected to the underlying file. See the discussion of
|
||||
// MAP_PRIVATE vs MAP_SHARED in the mmap(2) manual page.
|
||||
bool shared = true;
|
||||
|
||||
// Populate page tables; subsequent accesses should not be blocked
|
||||
// by page faults. This is a hint, as it may not be supported.
|
||||
bool prefault = false;
|
||||
|
||||
// Map the pages readable. Note that mapping pages without read permissions
|
||||
// is not universally supported (not supported on hugetlbfs on Linux, for
|
||||
// example)
|
||||
bool readable = true;
|
||||
|
||||
// Map the pages writable.
|
||||
bool writable = false;
|
||||
|
||||
// When mapping a file in writable mode, grow the file to the requested
|
||||
// length (using ftruncate()) before mapping; if false, truncate the
|
||||
// mapping to the actual file size instead.
|
||||
bool grow = false;
|
||||
|
||||
// Fix map at this address, if not nullptr. Must be aligned to a multiple
|
||||
// of the appropriate page size.
|
||||
void* address = nullptr;
|
||||
};
|
||||
|
||||
// Options to emulate the old WritableMemoryMapping: readable and writable,
|
||||
// allow growing the file if mapping past EOF.
|
||||
static Options writable() {
|
||||
return Options().setWritable(true).setGrow(true);
|
||||
}
|
||||
|
||||
enum AnonymousType {
|
||||
kAnonymous
|
||||
};
|
||||
|
||||
/**
|
||||
* Create an anonymous mapping.
|
||||
*/
|
||||
MemoryMapping(AnonymousType, off_t length, Options options=Options());
|
||||
|
||||
explicit MemoryMapping(File file,
|
||||
off_t offset=0,
|
||||
off_t length=-1,
|
||||
Options options=Options());
|
||||
|
||||
explicit MemoryMapping(const char* name,
|
||||
off_t offset=0,
|
||||
off_t length=-1,
|
||||
Options options=Options());
|
||||
|
||||
explicit MemoryMapping(int fd,
|
||||
off_t offset=0,
|
||||
off_t length=-1,
|
||||
Options options=Options());
|
||||
|
||||
MemoryMapping(MemoryMapping&&) noexcept;
|
||||
|
||||
~MemoryMapping();
|
||||
|
||||
MemoryMapping& operator=(MemoryMapping);
|
||||
|
||||
void swap(MemoryMapping& other) noexcept;
|
||||
|
||||
/**
|
||||
* Lock the pages in memory
|
||||
*/
|
||||
bool mlock(LockMode lock);
|
||||
|
||||
/**
|
||||
* Unlock the pages.
|
||||
* If dontneed is true, the kernel is instructed to release these pages
|
||||
* (per madvise(MADV_DONTNEED)).
|
||||
*/
|
||||
void munlock(bool dontneed = false);
|
||||
|
||||
/**
|
||||
* Hint that these pages will be scanned linearly.
|
||||
* madvise(MADV_SEQUENTIAL)
|
||||
*/
|
||||
void hintLinearScan();
|
||||
|
||||
/**
|
||||
* Advise the kernel about memory access.
|
||||
*/
|
||||
void advise(int advice) const;
|
||||
void advise(int advice, size_t offset, size_t length) const;
|
||||
|
||||
/**
|
||||
* A bitwise cast of the mapped bytes as range of values. Only intended for
|
||||
* use with POD or in-place usable types.
|
||||
*/
|
||||
template<class T>
|
||||
Range<const T*> asRange() const {
|
||||
size_t count = data_.size() / sizeof(T);
|
||||
return Range<const T*>(static_cast<const T*>(
|
||||
static_cast<const void*>(data_.data())),
|
||||
count);
|
||||
}
|
||||
|
||||
/**
|
||||
* A range of bytes mapped by this mapping.
|
||||
*/
|
||||
ByteRange range() const {
|
||||
return data_;
|
||||
}
|
||||
|
||||
/**
|
||||
* A bitwise cast of the mapped bytes as range of mutable values. Only
|
||||
* intended for use with POD or in-place usable types.
|
||||
*/
|
||||
template<class T>
|
||||
Range<T*> asWritableRange() const {
|
||||
DCHECK(options_.writable); // you'll segfault anyway...
|
||||
size_t count = data_.size() / sizeof(T);
|
||||
return Range<T*>(static_cast<T*>(
|
||||
static_cast<void*>(data_.data())),
|
||||
count);
|
||||
}
|
||||
|
||||
/**
|
||||
* A range of mutable bytes mapped by this mapping.
|
||||
*/
|
||||
MutableByteRange writableRange() const {
|
||||
DCHECK(options_.writable); // you'll segfault anyway...
|
||||
return data_;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the memory area where the file was mapped.
|
||||
* Deprecated; use range() instead.
|
||||
*/
|
||||
StringPiece data() const {
|
||||
return asRange<const char>();
|
||||
}
|
||||
|
||||
bool mlocked() const {
|
||||
return locked_;
|
||||
}
|
||||
|
||||
int fd() const { return file_.fd(); }
|
||||
|
||||
private:
|
||||
MemoryMapping();
|
||||
|
||||
enum InitFlags {
|
||||
kGrow = 1 << 0,
|
||||
kAnon = 1 << 1,
|
||||
};
|
||||
void init(off_t offset, off_t length);
|
||||
|
||||
File file_;
|
||||
void* mapStart_ = nullptr;
|
||||
off_t mapLength_ = 0;
|
||||
Options options_;
|
||||
bool locked_ = false;
|
||||
MutableByteRange data_;
|
||||
};
|
||||
|
||||
void swap(MemoryMapping&, MemoryMapping&) noexcept;
|
||||
|
||||
/**
|
||||
* A special case of memcpy() that always copies memory forwards.
|
||||
* (libc's memcpy() is allowed to copy memory backwards, and will do so
|
||||
* when using SSSE3 instructions).
|
||||
*
|
||||
* Assumes src and dest are aligned to alignof(unsigned long).
|
||||
*
|
||||
* Useful when copying from/to memory mappings after hintLinearScan();
|
||||
* copying backwards renders any prefetching useless (even harmful).
|
||||
*/
|
||||
void alignedForwardMemcpy(void* dest, const void* src, size_t size);
|
||||
|
||||
/**
|
||||
* Copy a file using mmap(). Overwrites dest.
|
||||
*/
|
||||
void mmapFileCopy(const char* src, const char* dest, mode_t mode = 0666);
|
||||
|
||||
} // namespace folly
|
61
ios/Pods/Folly/folly/MicroLock.h
generated
61
ios/Pods/Folly/folly/MicroLock.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2016-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -16,11 +16,12 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <assert.h>
|
||||
#include <cassert>
|
||||
#include <climits>
|
||||
#include <stdint.h>
|
||||
#include <folly/detail/Futex.h>
|
||||
#include <cstdint>
|
||||
|
||||
#include <folly/Portability.h>
|
||||
#include <folly/detail/Futex.h>
|
||||
|
||||
#if defined(__clang__)
|
||||
#define NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
|
||||
@ -107,17 +108,22 @@ class MicroLockCore {
|
||||
inline uint32_t baseShift(unsigned slot) const;
|
||||
inline uint32_t heldBit(unsigned slot) const;
|
||||
inline uint32_t waitBit(unsigned slot) const;
|
||||
static void lockSlowPath(uint32_t oldWord,
|
||||
detail::Futex<>* wordPtr,
|
||||
uint32_t slotHeldBit,
|
||||
unsigned maxSpins,
|
||||
unsigned maxYields);
|
||||
static void lockSlowPath(
|
||||
uint32_t oldWord,
|
||||
detail::Futex<>* wordPtr,
|
||||
uint32_t slotHeldBit,
|
||||
unsigned maxSpins,
|
||||
unsigned maxYields);
|
||||
|
||||
public:
|
||||
inline void unlock(unsigned slot) NO_SANITIZE_ADDRESS;
|
||||
inline void unlock() { unlock(0); }
|
||||
inline void unlock() {
|
||||
unlock(0);
|
||||
}
|
||||
// Initializes all the slots.
|
||||
inline void init() { lock_ = 0; }
|
||||
inline void init() {
|
||||
lock_ = 0;
|
||||
}
|
||||
};
|
||||
|
||||
inline detail::Futex<>* MicroLockCore::word() const {
|
||||
@ -156,8 +162,7 @@ void MicroLockCore::unlock(unsigned slot) {
|
||||
oldWord, newWord, std::memory_order_release, std::memory_order_relaxed));
|
||||
|
||||
if (oldWord & waitBit(slot)) {
|
||||
// We don't track the number of waiters, so wake everyone
|
||||
(void)wordPtr->futexWake(std::numeric_limits<int>::max(), heldBit(slot));
|
||||
detail::futexWake(wordPtr, 1, heldBit(slot));
|
||||
}
|
||||
}
|
||||
|
||||
@ -165,14 +170,17 @@ template <unsigned MaxSpins = 1000, unsigned MaxYields = 0>
|
||||
class MicroLockBase : public MicroLockCore {
|
||||
public:
|
||||
inline void lock(unsigned slot) NO_SANITIZE_ADDRESS;
|
||||
inline void lock() { lock(0); }
|
||||
inline void lock() {
|
||||
lock(0);
|
||||
}
|
||||
inline bool try_lock(unsigned slot) NO_SANITIZE_ADDRESS;
|
||||
inline bool try_lock() { return try_lock(0); }
|
||||
inline bool try_lock() {
|
||||
return try_lock(0);
|
||||
}
|
||||
};
|
||||
|
||||
template <unsigned MaxSpins, unsigned MaxYields>
|
||||
bool MicroLockBase<MaxSpins, MaxYields>::try_lock(unsigned slot) {
|
||||
|
||||
// N.B. You might think that try_lock is just the fast path of lock,
|
||||
// but you'd be wrong. Keep in mind that other parts of our host
|
||||
// word might be changing while we take the lock! We're not allowed
|
||||
@ -189,27 +197,28 @@ bool MicroLockBase<MaxSpins, MaxYields>::try_lock(unsigned slot) {
|
||||
if (oldWord & heldBit(slot)) {
|
||||
return false;
|
||||
}
|
||||
} while (!wordPtr->compare_exchange_weak(oldWord,
|
||||
oldWord | heldBit(slot),
|
||||
std::memory_order_acquire,
|
||||
std::memory_order_relaxed));
|
||||
} while (!wordPtr->compare_exchange_weak(
|
||||
oldWord,
|
||||
oldWord | heldBit(slot),
|
||||
std::memory_order_acquire,
|
||||
std::memory_order_relaxed));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
template <unsigned MaxSpins, unsigned MaxYields>
|
||||
void MicroLockBase<MaxSpins, MaxYields>::lock(unsigned slot) {
|
||||
|
||||
static_assert(MaxSpins + MaxYields < (unsigned)-1, "overflow");
|
||||
|
||||
detail::Futex<>* wordPtr = word();
|
||||
uint32_t oldWord;
|
||||
oldWord = wordPtr->load(std::memory_order_relaxed);
|
||||
if ((oldWord & heldBit(slot)) == 0 &&
|
||||
wordPtr->compare_exchange_weak(oldWord,
|
||||
oldWord | heldBit(slot),
|
||||
std::memory_order_acquire,
|
||||
std::memory_order_relaxed)) {
|
||||
wordPtr->compare_exchange_weak(
|
||||
oldWord,
|
||||
oldWord | heldBit(slot),
|
||||
std::memory_order_acquire,
|
||||
std::memory_order_relaxed)) {
|
||||
// Fast uncontended case: memory_order_acquire above is our barrier
|
||||
} else {
|
||||
// lockSlowPath doesn't have any slot-dependent computation; it
|
||||
@ -222,4 +231,4 @@ void MicroLockBase<MaxSpins, MaxYields>::lock(unsigned slot) {
|
||||
}
|
||||
|
||||
typedef MicroLockBase<> MicroLock;
|
||||
}
|
||||
} // namespace folly
|
||||
|
147
ios/Pods/Folly/folly/MicroSpinLock.h
generated
147
ios/Pods/Folly/folly/MicroSpinLock.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2011-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -14,147 +14,4 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* N.B. You most likely do _not_ want to use MicroSpinLock or any
|
||||
* other kind of spinlock. Consider MicroLock instead.
|
||||
*
|
||||
* In short, spinlocks in preemptive multi-tasking operating systems
|
||||
* have serious problems and fast mutexes like std::mutex are almost
|
||||
* certainly the better choice, because letting the OS scheduler put a
|
||||
* thread to sleep is better for system responsiveness and throughput
|
||||
* than wasting a timeslice repeatedly querying a lock held by a
|
||||
* thread that's blocked, and you can't prevent userspace
|
||||
* programs blocking.
|
||||
*
|
||||
* Spinlocks in an operating system kernel make much more sense than
|
||||
* they do in userspace.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
/*
|
||||
* @author Keith Adams <kma@fb.com>
|
||||
* @author Jordan DeLong <delong.j@fb.com>
|
||||
*/
|
||||
|
||||
#include <array>
|
||||
#include <cinttypes>
|
||||
#include <type_traits>
|
||||
#include <boost/noncopyable.hpp>
|
||||
#include <cstdlib>
|
||||
#include <pthread.h>
|
||||
#include <mutex>
|
||||
#include <atomic>
|
||||
|
||||
#include <glog/logging.h>
|
||||
#include <folly/detail/Sleeper.h>
|
||||
#include <folly/Portability.h>
|
||||
|
||||
namespace folly {
|
||||
|
||||
/*
|
||||
* A really, *really* small spinlock for fine-grained locking of lots
|
||||
* of teeny-tiny data.
|
||||
*
|
||||
* Zero initializing these is guaranteed to be as good as calling
|
||||
* init(), since the free state is guaranteed to be all-bits zero.
|
||||
*
|
||||
* This class should be kept a POD, so we can used it in other packed
|
||||
* structs (gcc does not allow __attribute__((__packed__)) on structs that
|
||||
* contain non-POD data). This means avoid adding a constructor, or
|
||||
* making some members private, etc.
|
||||
*/
|
||||
struct MicroSpinLock {
|
||||
enum { FREE = 0, LOCKED = 1 };
|
||||
// lock_ can't be std::atomic<> to preserve POD-ness.
|
||||
uint8_t lock_;
|
||||
|
||||
// Initialize this MSL. It is unnecessary to call this if you
|
||||
// zero-initialize the MicroSpinLock.
|
||||
void init() {
|
||||
payload()->store(FREE);
|
||||
}
|
||||
|
||||
bool try_lock() {
|
||||
return cas(FREE, LOCKED);
|
||||
}
|
||||
|
||||
void lock() {
|
||||
detail::Sleeper sleeper;
|
||||
do {
|
||||
while (payload()->load() != FREE) {
|
||||
sleeper.wait();
|
||||
}
|
||||
} while (!try_lock());
|
||||
DCHECK(payload()->load() == LOCKED);
|
||||
}
|
||||
|
||||
void unlock() {
|
||||
CHECK(payload()->load() == LOCKED);
|
||||
payload()->store(FREE, std::memory_order_release);
|
||||
}
|
||||
|
||||
private:
|
||||
std::atomic<uint8_t>* payload() {
|
||||
return reinterpret_cast<std::atomic<uint8_t>*>(&this->lock_);
|
||||
}
|
||||
|
||||
bool cas(uint8_t compare, uint8_t newVal) {
|
||||
return std::atomic_compare_exchange_strong_explicit(payload(), &compare, newVal,
|
||||
std::memory_order_acquire,
|
||||
std::memory_order_relaxed);
|
||||
}
|
||||
};
|
||||
static_assert(
|
||||
std::is_pod<MicroSpinLock>::value,
|
||||
"MicroSpinLock must be kept a POD type.");
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
||||
/**
|
||||
* Array of spinlocks where each one is padded to prevent false sharing.
|
||||
* Useful for shard-based locking implementations in environments where
|
||||
* contention is unlikely.
|
||||
*/
|
||||
|
||||
// TODO: generate it from configure (`getconf LEVEL1_DCACHE_LINESIZE`)
|
||||
#define FOLLY_CACHE_LINE_SIZE 64
|
||||
|
||||
template <class T, size_t N>
|
||||
struct FOLLY_ALIGNED_MAX SpinLockArray {
|
||||
T& operator[](size_t i) {
|
||||
return data_[i].lock;
|
||||
}
|
||||
|
||||
const T& operator[](size_t i) const {
|
||||
return data_[i].lock;
|
||||
}
|
||||
|
||||
constexpr size_t size() const { return N; }
|
||||
|
||||
private:
|
||||
struct PaddedSpinLock {
|
||||
PaddedSpinLock() : lock() {}
|
||||
T lock;
|
||||
char padding[FOLLY_CACHE_LINE_SIZE - sizeof(T)];
|
||||
};
|
||||
static_assert(sizeof(PaddedSpinLock) == FOLLY_CACHE_LINE_SIZE,
|
||||
"Invalid size of PaddedSpinLock");
|
||||
|
||||
// Check if T can theoretically cross a cache line.
|
||||
static_assert(alignof(std::max_align_t) > 0 &&
|
||||
FOLLY_CACHE_LINE_SIZE % alignof(std::max_align_t) == 0 &&
|
||||
sizeof(T) <= alignof(std::max_align_t),
|
||||
"T can cross cache line boundaries");
|
||||
|
||||
char padding_[FOLLY_CACHE_LINE_SIZE];
|
||||
std::array<PaddedSpinLock, N> data_;
|
||||
};
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
||||
typedef std::lock_guard<MicroSpinLock> MSLGuard;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
||||
}
|
||||
#include <folly/synchronization/MicroSpinLock.h> // @shim
|
||||
|
27
ios/Pods/Folly/folly/MoveWrapper.h
generated
27
ios/Pods/Folly/folly/MoveWrapper.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2013-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -39,8 +39,7 @@ class MoveWrapper {
|
||||
MoveWrapper() = default;
|
||||
|
||||
/// Move a value in.
|
||||
explicit
|
||||
MoveWrapper(T&& t) : value(std::move(t)) {}
|
||||
explicit MoveWrapper(T&& t) : value(std::move(t)) {}
|
||||
|
||||
/// copy is move
|
||||
MoveWrapper(const MoveWrapper& other) : value(std::move(other.value)) {}
|
||||
@ -48,14 +47,24 @@ class MoveWrapper {
|
||||
/// move is also move
|
||||
MoveWrapper(MoveWrapper&& other) : value(std::move(other.value)) {}
|
||||
|
||||
const T& operator*() const { return value; }
|
||||
T& operator*() { return value; }
|
||||
const T& operator*() const {
|
||||
return value;
|
||||
}
|
||||
T& operator*() {
|
||||
return value;
|
||||
}
|
||||
|
||||
const T* operator->() const { return &value; }
|
||||
T* operator->() { return &value; }
|
||||
const T* operator->() const {
|
||||
return &value;
|
||||
}
|
||||
T* operator->() {
|
||||
return &value;
|
||||
}
|
||||
|
||||
/// move the value out (sugar for std::move(*moveWrapper))
|
||||
T&& move() { return std::move(value); }
|
||||
T&& move() {
|
||||
return std::move(value);
|
||||
}
|
||||
|
||||
// If you want these you're probably doing it wrong, though they'd be
|
||||
// easy enough to implement
|
||||
@ -74,4 +83,4 @@ MoveWrapper<T0> makeMoveWrapper(T&& t) {
|
||||
return MoveWrapper<T0>(std::forward<T0>(t));
|
||||
}
|
||||
|
||||
} // namespace
|
||||
} // namespace folly
|
||||
|
469
ios/Pods/Folly/folly/Optional.h
generated
469
ios/Pods/Folly/folly/Optional.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2012-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -13,7 +13,6 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
/*
|
||||
@ -53,69 +52,94 @@
|
||||
* cout << *v << endl;
|
||||
* }
|
||||
*/
|
||||
|
||||
#include <cstddef>
|
||||
#include <functional>
|
||||
#include <new>
|
||||
#include <stdexcept>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
|
||||
#include <folly/Portability.h>
|
||||
#include <folly/Traits.h>
|
||||
#include <folly/Utility.h>
|
||||
#include <folly/lang/Exception.h>
|
||||
|
||||
namespace folly {
|
||||
|
||||
namespace detail { struct NoneHelper {}; }
|
||||
template <class Value>
|
||||
class Optional;
|
||||
|
||||
namespace detail {
|
||||
struct NoneHelper {};
|
||||
|
||||
template <class Value>
|
||||
struct OptionalPromiseReturn;
|
||||
} // namespace detail
|
||||
|
||||
typedef int detail::NoneHelper::*None;
|
||||
|
||||
const None none = nullptr;
|
||||
const None none = {};
|
||||
|
||||
class OptionalEmptyException : public std::runtime_error {
|
||||
class FOLLY_EXPORT OptionalEmptyException : public std::runtime_error {
|
||||
public:
|
||||
OptionalEmptyException()
|
||||
: std::runtime_error("Empty Optional cannot be unwrapped") {}
|
||||
};
|
||||
|
||||
template<class Value>
|
||||
template <class Value>
|
||||
class Optional {
|
||||
public:
|
||||
typedef Value value_type;
|
||||
|
||||
static_assert(!std::is_reference<Value>::value,
|
||||
"Optional may not be used with reference types");
|
||||
static_assert(!std::is_abstract<Value>::value,
|
||||
"Optional may not be used with abstract types");
|
||||
static_assert(
|
||||
!std::is_reference<Value>::value,
|
||||
"Optional may not be used with reference types");
|
||||
static_assert(
|
||||
!std::is_abstract<Value>::value,
|
||||
"Optional may not be used with abstract types");
|
||||
|
||||
Optional() noexcept {
|
||||
}
|
||||
|
||||
Optional(const Optional& src)
|
||||
noexcept(std::is_nothrow_copy_constructible<Value>::value) {
|
||||
FOLLY_CPP14_CONSTEXPR Optional() noexcept {}
|
||||
|
||||
Optional(const Optional& src) noexcept(
|
||||
std::is_nothrow_copy_constructible<Value>::value) {
|
||||
if (src.hasValue()) {
|
||||
construct(src.value());
|
||||
}
|
||||
}
|
||||
|
||||
Optional(Optional&& src)
|
||||
noexcept(std::is_nothrow_move_constructible<Value>::value) {
|
||||
|
||||
Optional(Optional&& src) noexcept(
|
||||
std::is_nothrow_move_constructible<Value>::value) {
|
||||
if (src.hasValue()) {
|
||||
construct(std::move(src.value()));
|
||||
src.clear();
|
||||
}
|
||||
}
|
||||
|
||||
/* implicit */ Optional(const None&) noexcept {
|
||||
}
|
||||
FOLLY_CPP14_CONSTEXPR /* implicit */ Optional(const None&) noexcept {}
|
||||
|
||||
/* implicit */ Optional(Value&& newValue)
|
||||
noexcept(std::is_nothrow_move_constructible<Value>::value) {
|
||||
FOLLY_CPP14_CONSTEXPR /* implicit */ Optional(Value&& newValue) noexcept(
|
||||
std::is_nothrow_move_constructible<Value>::value) {
|
||||
construct(std::move(newValue));
|
||||
}
|
||||
|
||||
/* implicit */ Optional(const Value& newValue)
|
||||
noexcept(std::is_nothrow_copy_constructible<Value>::value) {
|
||||
FOLLY_CPP14_CONSTEXPR /* implicit */ Optional(const Value& newValue) noexcept(
|
||||
std::is_nothrow_copy_constructible<Value>::value) {
|
||||
construct(newValue);
|
||||
}
|
||||
|
||||
template <typename... Args>
|
||||
FOLLY_CPP14_CONSTEXPR explicit Optional(in_place_t, Args&&... args) noexcept(
|
||||
std::is_nothrow_constructible<Value, Args...>::value) {
|
||||
construct(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
// Used only when an Optional is used with coroutines on MSVC
|
||||
/* implicit */ Optional(const detail::OptionalPromiseReturn<Value>& p)
|
||||
: Optional{} {
|
||||
p.promise_->value_ = this;
|
||||
}
|
||||
|
||||
void assign(const None&) {
|
||||
clear();
|
||||
}
|
||||
@ -155,57 +179,83 @@ class Optional {
|
||||
}
|
||||
}
|
||||
|
||||
template<class Arg>
|
||||
template <class Arg>
|
||||
Optional& operator=(Arg&& arg) {
|
||||
assign(std::forward<Arg>(arg));
|
||||
return *this;
|
||||
}
|
||||
|
||||
Optional& operator=(Optional &&other)
|
||||
noexcept (std::is_nothrow_move_assignable<Value>::value) {
|
||||
|
||||
Optional& operator=(Optional&& other) noexcept(
|
||||
std::is_nothrow_move_assignable<Value>::value) {
|
||||
assign(std::move(other));
|
||||
return *this;
|
||||
}
|
||||
|
||||
Optional& operator=(const Optional &other)
|
||||
noexcept (std::is_nothrow_copy_assignable<Value>::value) {
|
||||
|
||||
Optional& operator=(const Optional& other) noexcept(
|
||||
std::is_nothrow_copy_assignable<Value>::value) {
|
||||
assign(other);
|
||||
return *this;
|
||||
}
|
||||
|
||||
template<class... Args>
|
||||
void emplace(Args&&... args) {
|
||||
template <class... Args>
|
||||
Value& emplace(Args&&... args) {
|
||||
clear();
|
||||
construct(std::forward<Args>(args)...);
|
||||
return value();
|
||||
}
|
||||
|
||||
void clear() {
|
||||
template <class U, class... Args>
|
||||
typename std::enable_if<
|
||||
std::is_constructible<Value, std::initializer_list<U>&, Args&&...>::value,
|
||||
Value&>::type
|
||||
emplace(std::initializer_list<U> ilist, Args&&... args) {
|
||||
clear();
|
||||
construct(ilist, std::forward<Args>(args)...);
|
||||
return value();
|
||||
}
|
||||
|
||||
void reset() noexcept {
|
||||
storage_.clear();
|
||||
}
|
||||
|
||||
const Value& value() const& {
|
||||
void clear() noexcept {
|
||||
reset();
|
||||
}
|
||||
|
||||
void swap(Optional& that) noexcept(IsNothrowSwappable<Value>::value) {
|
||||
if (hasValue() && that.hasValue()) {
|
||||
using std::swap;
|
||||
swap(value(), that.value());
|
||||
} else if (hasValue()) {
|
||||
that.emplace(std::move(value()));
|
||||
reset();
|
||||
} else if (that.hasValue()) {
|
||||
emplace(std::move(that.value()));
|
||||
that.reset();
|
||||
}
|
||||
}
|
||||
|
||||
FOLLY_CPP14_CONSTEXPR const Value& value() const& {
|
||||
require_value();
|
||||
return storage_.value;
|
||||
}
|
||||
|
||||
Value& value() & {
|
||||
FOLLY_CPP14_CONSTEXPR Value& value() & {
|
||||
require_value();
|
||||
return storage_.value;
|
||||
}
|
||||
|
||||
Value&& value() && {
|
||||
FOLLY_CPP14_CONSTEXPR Value&& value() && {
|
||||
require_value();
|
||||
return std::move(storage_.value);
|
||||
}
|
||||
|
||||
const Value&& value() const&& {
|
||||
FOLLY_CPP14_CONSTEXPR const Value&& value() const&& {
|
||||
require_value();
|
||||
return std::move(storage_.value);
|
||||
}
|
||||
|
||||
const Value* get_pointer() const& {
|
||||
const Value* get_pointer() const& {
|
||||
return storage_.hasValue ? &storage_.value : nullptr;
|
||||
}
|
||||
Value* get_pointer() & {
|
||||
@ -213,23 +263,41 @@ class Optional {
|
||||
}
|
||||
Value* get_pointer() && = delete;
|
||||
|
||||
bool hasValue() const { return storage_.hasValue; }
|
||||
|
||||
explicit operator bool() const {
|
||||
return hasValue();
|
||||
FOLLY_CPP14_CONSTEXPR bool has_value() const noexcept {
|
||||
return storage_.hasValue;
|
||||
}
|
||||
|
||||
const Value& operator*() const& { return value(); }
|
||||
Value& operator*() & { return value(); }
|
||||
const Value&& operator*() const&& { return std::move(value()); }
|
||||
Value&& operator*() && { return std::move(value()); }
|
||||
FOLLY_CPP14_CONSTEXPR bool hasValue() const noexcept {
|
||||
return has_value();
|
||||
}
|
||||
|
||||
const Value* operator->() const { return &value(); }
|
||||
Value* operator->() { return &value(); }
|
||||
FOLLY_CPP14_CONSTEXPR explicit operator bool() const noexcept {
|
||||
return has_value();
|
||||
}
|
||||
|
||||
FOLLY_CPP14_CONSTEXPR const Value& operator*() const& {
|
||||
return value();
|
||||
}
|
||||
FOLLY_CPP14_CONSTEXPR Value& operator*() & {
|
||||
return value();
|
||||
}
|
||||
FOLLY_CPP14_CONSTEXPR const Value&& operator*() const&& {
|
||||
return std::move(value());
|
||||
}
|
||||
FOLLY_CPP14_CONSTEXPR Value&& operator*() && {
|
||||
return std::move(value());
|
||||
}
|
||||
|
||||
FOLLY_CPP14_CONSTEXPR const Value* operator->() const {
|
||||
return &value();
|
||||
}
|
||||
FOLLY_CPP14_CONSTEXPR Value* operator->() {
|
||||
return &value();
|
||||
}
|
||||
|
||||
// Return a copy of the value if set, or a given default if not.
|
||||
template <class U>
|
||||
Value value_or(U&& dflt) const& {
|
||||
FOLLY_CPP14_CONSTEXPR Value value_or(U&& dflt) const& {
|
||||
if (storage_.hasValue) {
|
||||
return storage_.value;
|
||||
}
|
||||
@ -238,7 +306,7 @@ class Optional {
|
||||
}
|
||||
|
||||
template <class U>
|
||||
Value value_or(U&& dflt) && {
|
||||
FOLLY_CPP14_CONSTEXPR Value value_or(U&& dflt) && {
|
||||
if (storage_.hasValue) {
|
||||
return std::move(storage_.value);
|
||||
}
|
||||
@ -249,53 +317,49 @@ class Optional {
|
||||
private:
|
||||
void require_value() const {
|
||||
if (!storage_.hasValue) {
|
||||
throw OptionalEmptyException();
|
||||
throw_exception<OptionalEmptyException>();
|
||||
}
|
||||
}
|
||||
|
||||
template<class... Args>
|
||||
template <class... Args>
|
||||
void construct(Args&&... args) {
|
||||
const void* ptr = &storage_.value;
|
||||
// for supporting const types
|
||||
new(const_cast<void*>(ptr)) Value(std::forward<Args>(args)...);
|
||||
// For supporting const types.
|
||||
new (const_cast<void*>(ptr)) Value(std::forward<Args>(args)...);
|
||||
storage_.hasValue = true;
|
||||
}
|
||||
|
||||
struct StorageTriviallyDestructible {
|
||||
// The union trick allows to initialize the Optional's memory,
|
||||
// so that compiler/tools don't complain about unitialized memory,
|
||||
// without actually calling Value's default constructor.
|
||||
// The rest of the implementation enforces that hasValue/value are
|
||||
// synchronized.
|
||||
union {
|
||||
bool hasValue;
|
||||
struct {
|
||||
bool paddingForHasValue_[1];
|
||||
Value value;
|
||||
};
|
||||
char emptyState;
|
||||
Value value;
|
||||
};
|
||||
bool hasValue;
|
||||
|
||||
StorageTriviallyDestructible() : hasValue{false} {}
|
||||
|
||||
void clear() {
|
||||
hasValue = false;
|
||||
}
|
||||
};
|
||||
|
||||
struct StorageNonTriviallyDestructible {
|
||||
// See StorageTriviallyDestructible's union
|
||||
union {
|
||||
bool hasValue;
|
||||
struct {
|
||||
bool paddingForHasValue_[1];
|
||||
Value value;
|
||||
};
|
||||
char emptyState;
|
||||
Value value;
|
||||
};
|
||||
bool hasValue;
|
||||
|
||||
FOLLY_PUSH_WARNING
|
||||
// These are both informational warnings, but they trigger rare
|
||||
// enough that we've left them enabled. Needed as long as MSVC
|
||||
// 2015 is supported.
|
||||
FOLLY_MSVC_DISABLE_WARNING(4587) // constructor of .value is not called
|
||||
FOLLY_MSVC_DISABLE_WARNING(4588) // destructor of .value is not called
|
||||
StorageNonTriviallyDestructible() : hasValue{false} {}
|
||||
~StorageNonTriviallyDestructible() {
|
||||
clear();
|
||||
}
|
||||
FOLLY_POP_WARNING
|
||||
|
||||
void clear() {
|
||||
if (hasValue) {
|
||||
@ -305,108 +369,269 @@ class Optional {
|
||||
}
|
||||
};
|
||||
|
||||
using Storage =
|
||||
typename std::conditional<std::is_trivially_destructible<Value>::value,
|
||||
StorageTriviallyDestructible,
|
||||
StorageNonTriviallyDestructible>::type;
|
||||
using Storage = typename std::conditional<
|
||||
std::is_trivially_destructible<Value>::value,
|
||||
StorageTriviallyDestructible,
|
||||
StorageNonTriviallyDestructible>::type;
|
||||
|
||||
Storage storage_;
|
||||
};
|
||||
|
||||
template<class T>
|
||||
template <class T>
|
||||
const T* get_pointer(const Optional<T>& opt) {
|
||||
return opt.get_pointer();
|
||||
}
|
||||
|
||||
template<class T>
|
||||
template <class T>
|
||||
T* get_pointer(Optional<T>& opt) {
|
||||
return opt.get_pointer();
|
||||
}
|
||||
|
||||
template<class T>
|
||||
void swap(Optional<T>& a, Optional<T>& b) {
|
||||
if (a.hasValue() && b.hasValue()) {
|
||||
// both full
|
||||
using std::swap;
|
||||
swap(a.value(), b.value());
|
||||
} else if (a.hasValue() || b.hasValue()) {
|
||||
std::swap(a, b); // fall back to default implementation if they're mixed.
|
||||
}
|
||||
template <class T>
|
||||
void swap(Optional<T>& a, Optional<T>& b) noexcept(noexcept(a.swap(b))) {
|
||||
a.swap(b);
|
||||
}
|
||||
|
||||
template<class T,
|
||||
class Opt = Optional<typename std::decay<T>::type>>
|
||||
Opt make_optional(T&& v) {
|
||||
template <class T, class Opt = Optional<typename std::decay<T>::type>>
|
||||
constexpr Opt make_optional(T&& v) {
|
||||
return Opt(std::forward<T>(v));
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Comparisons.
|
||||
|
||||
template<class V>
|
||||
bool operator==(const Optional<V>& a, const V& b) {
|
||||
template <class U, class V>
|
||||
constexpr bool operator==(const Optional<U>& a, const V& b) {
|
||||
return a.hasValue() && a.value() == b;
|
||||
}
|
||||
|
||||
template<class V>
|
||||
bool operator!=(const Optional<V>& a, const V& b) {
|
||||
template <class U, class V>
|
||||
constexpr bool operator!=(const Optional<U>& a, const V& b) {
|
||||
return !(a == b);
|
||||
}
|
||||
|
||||
template<class V>
|
||||
bool operator==(const V& a, const Optional<V>& b) {
|
||||
template <class U, class V>
|
||||
constexpr bool operator==(const U& a, const Optional<V>& b) {
|
||||
return b.hasValue() && b.value() == a;
|
||||
}
|
||||
|
||||
template<class V>
|
||||
bool operator!=(const V& a, const Optional<V>& b) {
|
||||
template <class U, class V>
|
||||
constexpr bool operator!=(const U& a, const Optional<V>& b) {
|
||||
return !(a == b);
|
||||
}
|
||||
|
||||
template<class V>
|
||||
bool operator==(const Optional<V>& a, const Optional<V>& b) {
|
||||
if (a.hasValue() != b.hasValue()) { return false; }
|
||||
if (a.hasValue()) { return a.value() == b.value(); }
|
||||
template <class U, class V>
|
||||
FOLLY_CPP14_CONSTEXPR bool operator==(
|
||||
const Optional<U>& a,
|
||||
const Optional<V>& b) {
|
||||
if (a.hasValue() != b.hasValue()) {
|
||||
return false;
|
||||
}
|
||||
if (a.hasValue()) {
|
||||
return a.value() == b.value();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
template<class V>
|
||||
bool operator!=(const Optional<V>& a, const Optional<V>& b) {
|
||||
template <class U, class V>
|
||||
constexpr bool operator!=(const Optional<U>& a, const Optional<V>& b) {
|
||||
return !(a == b);
|
||||
}
|
||||
|
||||
template<class V>
|
||||
bool operator< (const Optional<V>& a, const Optional<V>& b) {
|
||||
if (a.hasValue() != b.hasValue()) { return a.hasValue() < b.hasValue(); }
|
||||
if (a.hasValue()) { return a.value() < b.value(); }
|
||||
template <class U, class V>
|
||||
FOLLY_CPP14_CONSTEXPR bool operator<(
|
||||
const Optional<U>& a,
|
||||
const Optional<V>& b) {
|
||||
if (a.hasValue() != b.hasValue()) {
|
||||
return a.hasValue() < b.hasValue();
|
||||
}
|
||||
if (a.hasValue()) {
|
||||
return a.value() < b.value();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
template<class V>
|
||||
bool operator> (const Optional<V>& a, const Optional<V>& b) {
|
||||
template <class U, class V>
|
||||
constexpr bool operator>(const Optional<U>& a, const Optional<V>& b) {
|
||||
return b < a;
|
||||
}
|
||||
|
||||
template<class V>
|
||||
bool operator<=(const Optional<V>& a, const Optional<V>& b) {
|
||||
template <class U, class V>
|
||||
constexpr bool operator<=(const Optional<U>& a, const Optional<V>& b) {
|
||||
return !(b < a);
|
||||
}
|
||||
|
||||
template<class V>
|
||||
bool operator>=(const Optional<V>& a, const Optional<V>& b) {
|
||||
template <class U, class V>
|
||||
constexpr bool operator>=(const Optional<U>& a, const Optional<V>& b) {
|
||||
return !(a < b);
|
||||
}
|
||||
|
||||
// Suppress comparability of Optional<T> with T, despite implicit conversion.
|
||||
template<class V> bool operator< (const Optional<V>&, const V& other) = delete;
|
||||
template<class V> bool operator<=(const Optional<V>&, const V& other) = delete;
|
||||
template<class V> bool operator>=(const Optional<V>&, const V& other) = delete;
|
||||
template<class V> bool operator> (const Optional<V>&, const V& other) = delete;
|
||||
template<class V> bool operator< (const V& other, const Optional<V>&) = delete;
|
||||
template<class V> bool operator<=(const V& other, const Optional<V>&) = delete;
|
||||
template<class V> bool operator>=(const V& other, const Optional<V>&) = delete;
|
||||
template<class V> bool operator> (const V& other, const Optional<V>&) = delete;
|
||||
template <class V>
|
||||
bool operator<(const Optional<V>&, const V& other) = delete;
|
||||
template <class V>
|
||||
bool operator<=(const Optional<V>&, const V& other) = delete;
|
||||
template <class V>
|
||||
bool operator>=(const Optional<V>&, const V& other) = delete;
|
||||
template <class V>
|
||||
bool operator>(const Optional<V>&, const V& other) = delete;
|
||||
template <class V>
|
||||
bool operator<(const V& other, const Optional<V>&) = delete;
|
||||
template <class V>
|
||||
bool operator<=(const V& other, const Optional<V>&) = delete;
|
||||
template <class V>
|
||||
bool operator>=(const V& other, const Optional<V>&) = delete;
|
||||
template <class V>
|
||||
bool operator>(const V& other, const Optional<V>&) = delete;
|
||||
|
||||
// Comparisons with none
|
||||
template <class V>
|
||||
constexpr bool operator==(const Optional<V>& a, None) noexcept {
|
||||
return !a.hasValue();
|
||||
}
|
||||
template <class V>
|
||||
constexpr bool operator==(None, const Optional<V>& a) noexcept {
|
||||
return !a.hasValue();
|
||||
}
|
||||
template <class V>
|
||||
constexpr bool operator<(const Optional<V>&, None) noexcept {
|
||||
return false;
|
||||
}
|
||||
template <class V>
|
||||
constexpr bool operator<(None, const Optional<V>& a) noexcept {
|
||||
return a.hasValue();
|
||||
}
|
||||
template <class V>
|
||||
constexpr bool operator>(const Optional<V>& a, None) noexcept {
|
||||
return a.hasValue();
|
||||
}
|
||||
template <class V>
|
||||
constexpr bool operator>(None, const Optional<V>&) noexcept {
|
||||
return false;
|
||||
}
|
||||
template <class V>
|
||||
constexpr bool operator<=(None, const Optional<V>&) noexcept {
|
||||
return true;
|
||||
}
|
||||
template <class V>
|
||||
constexpr bool operator<=(const Optional<V>& a, None) noexcept {
|
||||
return !a.hasValue();
|
||||
}
|
||||
template <class V>
|
||||
constexpr bool operator>=(const Optional<V>&, None) noexcept {
|
||||
return true;
|
||||
}
|
||||
template <class V>
|
||||
constexpr bool operator>=(None, const Optional<V>& a) noexcept {
|
||||
return !a.hasValue();
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
} // namespace folly
|
||||
|
||||
// Allow usage of Optional<T> in std::unordered_map and std::unordered_set
|
||||
FOLLY_NAMESPACE_STD_BEGIN
|
||||
template <class T>
|
||||
struct hash<folly::Optional<T>> {
|
||||
size_t operator()(folly::Optional<T> const& obj) const {
|
||||
if (!obj.hasValue()) {
|
||||
return 0;
|
||||
}
|
||||
return hash<typename remove_const<T>::type>()(*obj);
|
||||
}
|
||||
};
|
||||
FOLLY_NAMESPACE_STD_END
|
||||
|
||||
// Enable the use of folly::Optional with `co_await`
|
||||
// Inspired by https://github.com/toby-allsopp/coroutine_monad
|
||||
#if FOLLY_HAS_COROUTINES
|
||||
#include <experimental/coroutine>
|
||||
|
||||
namespace folly {
|
||||
namespace detail {
|
||||
template <typename Value>
|
||||
struct OptionalPromise;
|
||||
|
||||
template <typename Value>
|
||||
struct OptionalPromiseReturn {
|
||||
Optional<Value> storage_;
|
||||
OptionalPromise<Value>* promise_;
|
||||
/* implicit */ OptionalPromiseReturn(OptionalPromise<Value>& promise) noexcept
|
||||
: promise_(&promise) {
|
||||
promise.value_ = &storage_;
|
||||
}
|
||||
OptionalPromiseReturn(OptionalPromiseReturn&& that) noexcept
|
||||
: OptionalPromiseReturn{*that.promise_} {}
|
||||
~OptionalPromiseReturn() {}
|
||||
/* implicit */ operator Optional<Value>() & {
|
||||
return std::move(storage_);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Value>
|
||||
struct OptionalPromise {
|
||||
Optional<Value>* value_ = nullptr;
|
||||
OptionalPromise() = default;
|
||||
OptionalPromise(OptionalPromise const&) = delete;
|
||||
// This should work regardless of whether the compiler generates:
|
||||
// folly::Optional<Value> retobj{ p.get_return_object(); } // MSVC
|
||||
// or:
|
||||
// auto retobj = p.get_return_object(); // clang
|
||||
OptionalPromiseReturn<Value> get_return_object() noexcept {
|
||||
return *this;
|
||||
}
|
||||
std::experimental::suspend_never initial_suspend() const noexcept {
|
||||
return {};
|
||||
}
|
||||
std::experimental::suspend_never final_suspend() const {
|
||||
return {};
|
||||
}
|
||||
template <typename U>
|
||||
void return_value(U&& u) {
|
||||
*value_ = static_cast<U&&>(u);
|
||||
}
|
||||
void unhandled_exception() {
|
||||
// Technically, throwing from unhandled_exception is underspecified:
|
||||
// https://github.com/GorNishanov/CoroutineWording/issues/17
|
||||
throw;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Value>
|
||||
struct OptionalAwaitable {
|
||||
Optional<Value> o_;
|
||||
bool await_ready() const noexcept {
|
||||
return o_.hasValue();
|
||||
}
|
||||
Value await_resume() {
|
||||
return std::move(o_.value());
|
||||
}
|
||||
|
||||
// Explicitly only allow suspension into an OptionalPromise
|
||||
template <typename U>
|
||||
void await_suspend(
|
||||
std::experimental::coroutine_handle<OptionalPromise<U>> h) const {
|
||||
// Abort the rest of the coroutine. resume() is not going to be called
|
||||
h.destroy();
|
||||
}
|
||||
};
|
||||
} // namespace detail
|
||||
|
||||
template <typename Value>
|
||||
detail::OptionalAwaitable<Value>
|
||||
/* implicit */ operator co_await(Optional<Value> o) {
|
||||
return {std::move(o)};
|
||||
}
|
||||
} // namespace folly
|
||||
|
||||
// This makes folly::Optional<Value> useable as a coroutine return type..
|
||||
namespace std {
|
||||
namespace experimental {
|
||||
template <typename Value, typename... Args>
|
||||
struct coroutine_traits<folly::Optional<Value>, Args...> {
|
||||
using promise_type = folly::detail::OptionalPromise<Value>;
|
||||
};
|
||||
} // namespace experimental
|
||||
} // namespace std
|
||||
#endif // FOLLY_HAS_COROUTINES
|
||||
|
79
ios/Pods/Folly/folly/Overload.h
generated
Normal file
79
ios/Pods/Folly/folly/Overload.h
generated
Normal file
@ -0,0 +1,79 @@
|
||||
/*
|
||||
* Copyright 2017-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
|
||||
/**
|
||||
* folly implementation of `std::overload` like functionality
|
||||
*
|
||||
* Example:
|
||||
* struct One {};
|
||||
* struct Two {};
|
||||
* boost::variant<One, Two> value;
|
||||
*
|
||||
* variant_match(value,
|
||||
* [] (const One& one) { ... },
|
||||
* [] (const Two& two) { ... });
|
||||
*/
|
||||
|
||||
namespace folly {
|
||||
|
||||
namespace detail {
|
||||
template <typename...>
|
||||
struct Overload;
|
||||
|
||||
template <typename Case, typename... Cases>
|
||||
struct Overload<Case, Cases...> : Overload<Cases...>, Case {
|
||||
Overload(Case c, Cases... cs)
|
||||
: Overload<Cases...>(std::move(cs)...), Case(std::move(c)) {}
|
||||
|
||||
using Case::operator();
|
||||
using Overload<Cases...>::operator();
|
||||
};
|
||||
|
||||
template <typename Case>
|
||||
struct Overload<Case> : Case {
|
||||
explicit Overload(Case c) : Case(std::move(c)) {}
|
||||
|
||||
using Case::operator();
|
||||
};
|
||||
} // namespace detail
|
||||
|
||||
/*
|
||||
* Combine multiple `Cases` in one function object
|
||||
*/
|
||||
template <typename... Cases>
|
||||
decltype(auto) overload(Cases&&... cases) {
|
||||
return detail::Overload<typename std::decay<Cases>::type...>{
|
||||
std::forward<Cases>(cases)...};
|
||||
}
|
||||
|
||||
/*
|
||||
* Match `Variant` with one of the `Cases`
|
||||
*
|
||||
* Note: you can also use `[] (const auto&) {...}` as default case
|
||||
*
|
||||
*/
|
||||
template <typename Variant, typename... Cases>
|
||||
decltype(auto) variant_match(Variant&& variant, Cases&&... cases) {
|
||||
return apply_visitor(
|
||||
overload(std::forward<Cases>(cases)...), std::forward<Variant>(variant));
|
||||
}
|
||||
|
||||
} // namespace folly
|
59
ios/Pods/Folly/folly/PackedSyncPtr.h
generated
59
ios/Pods/Folly/folly/PackedSyncPtr.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2011-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -16,10 +16,15 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <folly/Portability.h>
|
||||
#include <type_traits>
|
||||
|
||||
#if !FOLLY_X64 && !FOLLY_PPC64
|
||||
# error "PackedSyncPtr is x64 and ppc64 specific code."
|
||||
#include <glog/logging.h>
|
||||
|
||||
#include <folly/Portability.h>
|
||||
#include <folly/synchronization/SmallLocks.h>
|
||||
|
||||
#if !FOLLY_X64 && !FOLLY_PPC64 && !FOLLY_AARCH64
|
||||
#error "PackedSyncPtr is x64, ppc64 or aarch64 specific code."
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -52,20 +57,16 @@
|
||||
* @author Jordan DeLong <delong.j@fb.com>
|
||||
*/
|
||||
|
||||
#include <folly/SmallLocks.h>
|
||||
#include <type_traits>
|
||||
#include <glog/logging.h>
|
||||
|
||||
namespace folly {
|
||||
|
||||
template<class T>
|
||||
template <class T>
|
||||
class PackedSyncPtr {
|
||||
// This just allows using this class even with T=void. Attempting
|
||||
// to use the operator* or operator[] on a PackedSyncPtr<void> will
|
||||
// still properly result in a compile error.
|
||||
typedef typename std::add_lvalue_reference<T>::type reference;
|
||||
|
||||
public:
|
||||
public:
|
||||
/*
|
||||
* If you default construct one of these, you must call this init()
|
||||
* function before using it.
|
||||
@ -73,7 +74,7 @@ public:
|
||||
* (We are avoiding a constructor to ensure gcc allows us to put
|
||||
* this class in packed structures.)
|
||||
*/
|
||||
void init(T* initialPtr = 0, uint16_t initialExtra = 0) {
|
||||
void init(T* initialPtr = nullptr, uint16_t initialExtra = 0) {
|
||||
auto intPtr = reinterpret_cast<uintptr_t>(initialPtr);
|
||||
CHECK(!(intPtr >> 48));
|
||||
data_.init(intPtr);
|
||||
@ -102,15 +103,27 @@ public:
|
||||
T* get() const {
|
||||
return reinterpret_cast<T*>(data_.getData() & (-1ull >> 16));
|
||||
}
|
||||
T* operator->() const { return get(); }
|
||||
reference operator*() const { return *get(); }
|
||||
reference operator[](std::ptrdiff_t i) const { return get()[i]; }
|
||||
T* operator->() const {
|
||||
return get();
|
||||
}
|
||||
reference operator*() const {
|
||||
return *get();
|
||||
}
|
||||
reference operator[](std::ptrdiff_t i) const {
|
||||
return get()[i];
|
||||
}
|
||||
|
||||
// Synchronization (logically const, even though this mutates our
|
||||
// locked state: you can lock a const PackedSyncPtr<T> to read it).
|
||||
void lock() const { data_.lock(); }
|
||||
void unlock() const { data_.unlock(); }
|
||||
bool try_lock() const { return data_.try_lock(); }
|
||||
void lock() const {
|
||||
data_.lock();
|
||||
}
|
||||
void unlock() const {
|
||||
data_.unlock();
|
||||
}
|
||||
bool try_lock() const {
|
||||
return data_.try_lock();
|
||||
}
|
||||
|
||||
/*
|
||||
* Access extra data stored in unused bytes of the pointer.
|
||||
@ -140,8 +153,14 @@ public:
|
||||
static_assert(
|
||||
std::is_pod<PackedSyncPtr<void>>::value,
|
||||
"PackedSyncPtr must be kept a POD type.");
|
||||
static_assert(sizeof(PackedSyncPtr<void>) == 8,
|
||||
"PackedSyncPtr should be only 8 bytes---something is "
|
||||
"messed up");
|
||||
static_assert(
|
||||
sizeof(PackedSyncPtr<void>) == 8,
|
||||
"PackedSyncPtr should be only 8 bytes---something is "
|
||||
"messed up");
|
||||
|
||||
template <typename T>
|
||||
std::ostream& operator<<(std::ostream& os, const PackedSyncPtr<T>& ptr) {
|
||||
os << "PackedSyncPtr(" << ptr.get() << ", " << ptr.extra() << ")";
|
||||
return os;
|
||||
}
|
||||
} // namespace folly
|
||||
|
205
ios/Pods/Folly/folly/Padded.h
generated
205
ios/Pods/Folly/folly/Padded.h
generated
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
* Copyright 2012-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -28,7 +28,7 @@
|
||||
#include <boost/iterator/iterator_adaptor.hpp>
|
||||
|
||||
#include <folly/Portability.h>
|
||||
#include <folly/ContainerTraits.h>
|
||||
#include <folly/Traits.h>
|
||||
|
||||
/**
|
||||
* Code that aids in storing data aligned on block (possibly cache-line)
|
||||
@ -53,32 +53,38 @@ namespace padded {
|
||||
* is intentional: Node itself is trivial, which means that it can be
|
||||
* serialized / deserialized using a simple memcpy.
|
||||
*/
|
||||
template <class T, size_t NS, class Enable=void>
|
||||
template <class T, size_t NS, class Enable = void>
|
||||
class Node;
|
||||
|
||||
namespace detail {
|
||||
// Shortcut to avoid writing the long enable_if expression every time
|
||||
template <class T, size_t NS, class Enable=void> struct NodeValid;
|
||||
template <class T, size_t NS, class Enable = void>
|
||||
struct NodeValid;
|
||||
template <class T, size_t NS>
|
||||
struct NodeValid<T, NS,
|
||||
typename std::enable_if<(
|
||||
std::is_trivial<T>::value &&
|
||||
sizeof(T) <= NS &&
|
||||
NS % alignof(T) == 0)>::type> {
|
||||
struct NodeValid<
|
||||
T,
|
||||
NS,
|
||||
typename std::enable_if<(
|
||||
std::is_trivial<T>::value && sizeof(T) <= NS &&
|
||||
NS % alignof(T) == 0)>::type> {
|
||||
typedef void type;
|
||||
};
|
||||
} // namespace detail
|
||||
} // namespace detail
|
||||
|
||||
template <class T, size_t NS>
|
||||
class Node<T, NS, typename detail::NodeValid<T,NS>::type> {
|
||||
class Node<T, NS, typename detail::NodeValid<T, NS>::type> {
|
||||
public:
|
||||
typedef T value_type;
|
||||
static constexpr size_t kNodeSize = NS;
|
||||
static constexpr size_t kElementCount = NS / sizeof(T);
|
||||
static constexpr size_t kPaddingBytes = NS % sizeof(T);
|
||||
|
||||
T* data() { return storage_.data; }
|
||||
const T* data() const { return storage_.data; }
|
||||
T* data() {
|
||||
return storage_.data;
|
||||
}
|
||||
const T* data() const {
|
||||
return storage_.data;
|
||||
}
|
||||
|
||||
bool operator==(const Node& other) const {
|
||||
return memcmp(data(), other.data(), sizeof(T) * kElementCount) == 0;
|
||||
@ -109,9 +115,10 @@ class Node<T, NS, typename detail::NodeValid<T,NS>::type> {
|
||||
* the last node is not included in the result.
|
||||
*/
|
||||
static constexpr size_t paddingBytes(size_t n) {
|
||||
return (n ? (kPaddingBytes +
|
||||
(kElementCount - 1 - (n-1) % kElementCount) * sizeof(T)) :
|
||||
0);
|
||||
return (
|
||||
n ? (kPaddingBytes +
|
||||
(kElementCount - 1 - (n - 1) % kElementCount) * sizeof(T))
|
||||
: 0);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -134,38 +141,73 @@ class Node<T, NS, typename detail::NodeValid<T,NS>::type> {
|
||||
|
||||
// We must define kElementCount and kPaddingBytes to work around a bug
|
||||
// in gtest that odr-uses them.
|
||||
template <class T, size_t NS> constexpr size_t
|
||||
Node<T, NS, typename detail::NodeValid<T,NS>::type>::kNodeSize;
|
||||
template <class T, size_t NS> constexpr size_t
|
||||
Node<T, NS, typename detail::NodeValid<T,NS>::type>::kElementCount;
|
||||
template <class T, size_t NS> constexpr size_t
|
||||
Node<T, NS, typename detail::NodeValid<T,NS>::type>::kPaddingBytes;
|
||||
template <class T, size_t NS>
|
||||
constexpr size_t
|
||||
Node<T, NS, typename detail::NodeValid<T, NS>::type>::kNodeSize;
|
||||
template <class T, size_t NS>
|
||||
constexpr size_t
|
||||
Node<T, NS, typename detail::NodeValid<T, NS>::type>::kElementCount;
|
||||
template <class T, size_t NS>
|
||||
constexpr size_t
|
||||
Node<T, NS, typename detail::NodeValid<T, NS>::type>::kPaddingBytes;
|
||||
|
||||
template <class Iter> class Iterator;
|
||||
template <class Iter>
|
||||
class Iterator;
|
||||
|
||||
namespace detail {
|
||||
|
||||
template <typename Void, typename Container, typename... Args>
|
||||
struct padded_emplace_back_or_push_back_ {
|
||||
static decltype(auto) go(Container& container, Args&&... args) {
|
||||
using Value = typename Container::value_type;
|
||||
return container.push_back(Value(std::forward<Args>(args)...));
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Container, typename... Args>
|
||||
struct padded_emplace_back_or_push_back_<
|
||||
void_t<decltype(
|
||||
std::declval<Container&>().emplace_back(std::declval<Args>()...))>,
|
||||
Container,
|
||||
Args...> {
|
||||
static decltype(auto) go(Container& container, Args&&... args) {
|
||||
return container.emplace_back(std::forward<Args>(args)...);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Container, typename... Args>
|
||||
decltype(auto) padded_emplace_back_or_push_back(
|
||||
Container& container,
|
||||
Args&&... args) {
|
||||
using impl = padded_emplace_back_or_push_back_<void, Container, Args...>;
|
||||
return impl::go(container, std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
// Helper class to transfer the constness from From (a lvalue reference)
|
||||
// and create a lvalue reference to To.
|
||||
//
|
||||
// TransferReferenceConstness<const string&, int> -> const int&
|
||||
// TransferReferenceConstness<string&, int> -> int&
|
||||
// TransferReferenceConstness<string&, const int> -> const int&
|
||||
template <class From, class To, class Enable=void>
|
||||
template <class From, class To, class Enable = void>
|
||||
struct TransferReferenceConstness;
|
||||
|
||||
template <class From, class To>
|
||||
struct TransferReferenceConstness<
|
||||
From, To, typename std::enable_if<std::is_const<
|
||||
typename std::remove_reference<From>::type>::value>::type> {
|
||||
From,
|
||||
To,
|
||||
typename std::enable_if<std::is_const<
|
||||
typename std::remove_reference<From>::type>::value>::type> {
|
||||
typedef typename std::add_lvalue_reference<
|
||||
typename std::add_const<To>::type>::type type;
|
||||
typename std::add_const<To>::type>::type type;
|
||||
};
|
||||
|
||||
template <class From, class To>
|
||||
struct TransferReferenceConstness<
|
||||
From, To, typename std::enable_if<!std::is_const<
|
||||
typename std::remove_reference<From>::type>::value>::type> {
|
||||
From,
|
||||
To,
|
||||
typename std::enable_if<!std::is_const<
|
||||
typename std::remove_reference<From>::type>::value>::type> {
|
||||
typedef typename std::add_lvalue_reference<To>::type type;
|
||||
};
|
||||
|
||||
@ -174,23 +216,22 @@ struct TransferReferenceConstness<
|
||||
template <class Iter>
|
||||
struct IteratorBase {
|
||||
typedef boost::iterator_adaptor<
|
||||
// CRTC
|
||||
Iterator<Iter>,
|
||||
// Base iterator type
|
||||
Iter,
|
||||
// Value type
|
||||
typename std::iterator_traits<Iter>::value_type::value_type,
|
||||
// Category or traversal
|
||||
boost::use_default,
|
||||
// Reference type
|
||||
typename detail::TransferReferenceConstness<
|
||||
typename std::iterator_traits<Iter>::reference,
|
||||
typename std::iterator_traits<Iter>::value_type::value_type
|
||||
>::type
|
||||
> type;
|
||||
// CRTC
|
||||
Iterator<Iter>,
|
||||
// Base iterator type
|
||||
Iter,
|
||||
// Value type
|
||||
typename std::iterator_traits<Iter>::value_type::value_type,
|
||||
// Category or traversal
|
||||
boost::use_default,
|
||||
// Reference type
|
||||
typename detail::TransferReferenceConstness<
|
||||
typename std::iterator_traits<Iter>::reference,
|
||||
typename std::iterator_traits<Iter>::value_type::value_type>::type>
|
||||
type;
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
} // namespace detail
|
||||
|
||||
/**
|
||||
* Wrapper around iterators to Node to return iterators to the underlying
|
||||
@ -199,19 +240,21 @@ struct IteratorBase {
|
||||
template <class Iter>
|
||||
class Iterator : public detail::IteratorBase<Iter>::type {
|
||||
typedef typename detail::IteratorBase<Iter>::type Super;
|
||||
|
||||
public:
|
||||
typedef typename std::iterator_traits<Iter>::value_type Node;
|
||||
|
||||
Iterator() : pos_(0) { }
|
||||
Iterator() : pos_(0) {}
|
||||
|
||||
explicit Iterator(Iter base)
|
||||
: Super(base),
|
||||
pos_(0) {
|
||||
}
|
||||
explicit Iterator(Iter base) : Super(base), pos_(0) {}
|
||||
|
||||
// Return the current node and the position inside the node
|
||||
const Node& node() const { return *this->base_reference(); }
|
||||
size_t pos() const { return pos_; }
|
||||
const Node& node() const {
|
||||
return *this->base_reference();
|
||||
}
|
||||
size_t pos() const {
|
||||
return pos_;
|
||||
}
|
||||
|
||||
private:
|
||||
typename Super::reference dereference() const {
|
||||
@ -219,12 +262,12 @@ class Iterator : public detail::IteratorBase<Iter>::type {
|
||||
}
|
||||
|
||||
bool equal(const Iterator& other) const {
|
||||
return (this->base_reference() == other.base_reference() &&
|
||||
pos_ == other.pos_);
|
||||
return (
|
||||
this->base_reference() == other.base_reference() && pos_ == other.pos_);
|
||||
}
|
||||
|
||||
void advance(typename Super::difference_type n) {
|
||||
constexpr ssize_t elementCount = Node::kElementCount; // signed!
|
||||
constexpr ssize_t elementCount = Node::kElementCount; // signed!
|
||||
ssize_t newPos = pos_ + n;
|
||||
if (newPos >= 0 && newPos < elementCount) {
|
||||
pos_ = newPos;
|
||||
@ -233,7 +276,7 @@ class Iterator : public detail::IteratorBase<Iter>::type {
|
||||
ssize_t nblocks = newPos / elementCount;
|
||||
newPos %= elementCount;
|
||||
if (newPos < 0) {
|
||||
--nblocks; // negative
|
||||
--nblocks; // negative
|
||||
newPos += elementCount;
|
||||
}
|
||||
this->base_reference() += nblocks;
|
||||
@ -255,14 +298,14 @@ class Iterator : public detail::IteratorBase<Iter>::type {
|
||||
}
|
||||
|
||||
typename Super::difference_type distance_to(const Iterator& other) const {
|
||||
constexpr ssize_t elementCount = Node::kElementCount; // signed!
|
||||
constexpr ssize_t elementCount = Node::kElementCount; // signed!
|
||||
ssize_t nblocks =
|
||||
std::distance(this->base_reference(), other.base_reference());
|
||||
std::distance(this->base_reference(), other.base_reference());
|
||||
return nblocks * elementCount + (other.pos_ - pos_);
|
||||
}
|
||||
|
||||
friend class boost::iterator_core_access;
|
||||
ssize_t pos_; // signed for easier advance() implementation
|
||||
ssize_t pos_; // signed for easier advance() implementation
|
||||
};
|
||||
|
||||
/**
|
||||
@ -339,13 +382,11 @@ class Adaptor {
|
||||
|
||||
static constexpr size_t kElementsPerNode = Node::kElementCount;
|
||||
// Constructors
|
||||
Adaptor() : lastCount_(Node::kElementCount) { }
|
||||
explicit Adaptor(Container c, size_t lastCount=Node::kElementCount)
|
||||
: c_(std::move(c)),
|
||||
lastCount_(lastCount) {
|
||||
}
|
||||
Adaptor() : lastCount_(Node::kElementCount) {}
|
||||
explicit Adaptor(Container c, size_t lastCount = Node::kElementCount)
|
||||
: c_(std::move(c)), lastCount_(lastCount) {}
|
||||
explicit Adaptor(size_t n, const value_type& value = value_type())
|
||||
: c_(Node::nodeCount(n), fullNode(value)) {
|
||||
: c_(Node::nodeCount(n), fullNode(value)) {
|
||||
const auto count = n % Node::kElementCount;
|
||||
lastCount_ = count != 0 ? count : Node::kElementCount;
|
||||
}
|
||||
@ -353,8 +394,7 @@ class Adaptor {
|
||||
Adaptor(const Adaptor&) = default;
|
||||
Adaptor& operator=(const Adaptor&) = default;
|
||||
Adaptor(Adaptor&& other) noexcept
|
||||
: c_(std::move(other.c_)),
|
||||
lastCount_(other.lastCount_) {
|
||||
: c_(std::move(other.c_)), lastCount_(other.lastCount_) {
|
||||
other.lastCount_ = Node::kElementCount;
|
||||
}
|
||||
Adaptor& operator=(Adaptor&& other) {
|
||||
@ -377,15 +417,19 @@ class Adaptor {
|
||||
}
|
||||
return it;
|
||||
}
|
||||
const_iterator begin() const { return cbegin(); }
|
||||
const_iterator end() const { return cend(); }
|
||||
const_iterator begin() const {
|
||||
return cbegin();
|
||||
}
|
||||
const_iterator end() const {
|
||||
return cend();
|
||||
}
|
||||
iterator begin() {
|
||||
return iterator(c_.begin());
|
||||
}
|
||||
iterator end() {
|
||||
auto it = iterator(c_.end());
|
||||
if (lastCount_ != Node::kElementCount) {
|
||||
it -= (Node::kElementCount - lastCount_);
|
||||
it -= difference_type(Node::kElementCount - lastCount_);
|
||||
}
|
||||
return it;
|
||||
}
|
||||
@ -398,14 +442,15 @@ class Adaptor {
|
||||
return c_.empty();
|
||||
}
|
||||
size_type size() const {
|
||||
return (c_.empty() ? 0 :
|
||||
(c_.size() - 1) * Node::kElementCount + lastCount_);
|
||||
return (
|
||||
c_.empty() ? 0 : (c_.size() - 1) * Node::kElementCount + lastCount_);
|
||||
}
|
||||
size_type max_size() const {
|
||||
return ((c_.max_size() <= std::numeric_limits<size_type>::max() /
|
||||
Node::kElementCount) ?
|
||||
c_.max_size() * Node::kElementCount :
|
||||
std::numeric_limits<size_type>::max());
|
||||
return (
|
||||
(c_.max_size() <=
|
||||
std::numeric_limits<size_type>::max() / Node::kElementCount)
|
||||
? c_.max_size() * Node::kElementCount
|
||||
: std::numeric_limits<size_type>::max());
|
||||
}
|
||||
|
||||
const value_type& front() const {
|
||||
@ -495,7 +540,7 @@ class Adaptor {
|
||||
private:
|
||||
value_type* allocate_back() {
|
||||
if (lastCount_ == Node::kElementCount) {
|
||||
container_emplace_back_or_push_back(c_);
|
||||
detail::padded_emplace_back_or_push_back(c_);
|
||||
lastCount_ = 0;
|
||||
}
|
||||
return &c_.back().data()[lastCount_++];
|
||||
@ -506,9 +551,9 @@ class Adaptor {
|
||||
std::fill(n.data(), n.data() + kElementsPerNode, value);
|
||||
return n;
|
||||
}
|
||||
Container c_; // container of Nodes
|
||||
size_t lastCount_; // number of elements in last Node
|
||||
Container c_; // container of Nodes
|
||||
size_t lastCount_; // number of elements in last Node
|
||||
};
|
||||
|
||||
} // namespace padded
|
||||
} // namespace folly
|
||||
} // namespace padded
|
||||
} // namespace folly
|
||||
|
271
ios/Pods/Folly/folly/PicoSpinLock.h
generated
271
ios/Pods/Folly/folly/PicoSpinLock.h
generated
@ -1,271 +0,0 @@
|
||||
/*
|
||||
* Copyright 2016 Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* N.B. You most likely do _not_ want to use PicoSpinLock or any other
|
||||
* kind of spinlock. Consider MicroLock instead.
|
||||
*
|
||||
* In short, spinlocks in preemptive multi-tasking operating systems
|
||||
* have serious problems and fast mutexes like std::mutex are almost
|
||||
* certainly the better choice, because letting the OS scheduler put a
|
||||
* thread to sleep is better for system responsiveness and throughput
|
||||
* than wasting a timeslice repeatedly querying a lock held by a
|
||||
* thread that's blocked, and you can't prevent userspace
|
||||
* programs blocking.
|
||||
*
|
||||
* Spinlocks in an operating system kernel make much more sense than
|
||||
* they do in userspace.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
#define FOLLY_PICO_SPIN_LOCK_H_
|
||||
|
||||
/*
|
||||
* @author Keith Adams <kma@fb.com>
|
||||
* @author Jordan DeLong <delong.j@fb.com>
|
||||
*/
|
||||
|
||||
#include <array>
|
||||
#include <atomic>
|
||||
#include <cinttypes>
|
||||
#include <cstdlib>
|
||||
#include <folly/Portability.h>
|
||||
#include <mutex>
|
||||
#include <pthread.h>
|
||||
#include <type_traits>
|
||||
|
||||
#include <glog/logging.h>
|
||||
#include <folly/detail/Sleeper.h>
|
||||
|
||||
#if !FOLLY_X64 && !FOLLY_A64 && !FOLLY_PPC64
|
||||
# error "PicoSpinLock.h is currently x64, aarch64 and ppc64 only."
|
||||
#endif
|
||||
|
||||
namespace folly {
|
||||
|
||||
/*
|
||||
* Spin lock on a single bit in an integral type. You can use this
|
||||
* with 16, 32, or 64-bit integral types.
|
||||
*
|
||||
* This is useful if you want a small lock and already have an int
|
||||
* with a bit in it that you aren't using. But note that it can't be
|
||||
* as small as MicroSpinLock (1 byte), if you don't already have a
|
||||
* convenient int with an unused bit lying around to put it on.
|
||||
*
|
||||
* To construct these, either use init() or zero initialize. We don't
|
||||
* have a real constructor because we want this to be a POD type so we
|
||||
* can put it into packed structs.
|
||||
*/
|
||||
template<class IntType, int Bit = sizeof(IntType) * 8 - 1>
|
||||
struct PicoSpinLock {
|
||||
// Internally we deal with the unsigned version of the type.
|
||||
typedef typename std::make_unsigned<IntType>::type UIntType;
|
||||
|
||||
static_assert(std::is_integral<IntType>::value,
|
||||
"PicoSpinLock needs an integral type");
|
||||
static_assert(sizeof(IntType) == 2 || sizeof(IntType) == 4 ||
|
||||
sizeof(IntType) == 8,
|
||||
"PicoSpinLock can't work on integers smaller than 2 bytes");
|
||||
|
||||
public:
|
||||
static const UIntType kLockBitMask_ = UIntType(1) << Bit;
|
||||
UIntType lock_;
|
||||
|
||||
/*
|
||||
* You must call this function before using this class, if you
|
||||
* default constructed it. If you zero-initialized it you can
|
||||
* assume the PicoSpinLock is in a valid unlocked state with
|
||||
* getData() == 0.
|
||||
*
|
||||
* (This doesn't use a constructor because we want to be a POD.)
|
||||
*/
|
||||
void init(IntType initialValue = 0) {
|
||||
CHECK(!(initialValue & kLockBitMask_));
|
||||
lock_ = initialValue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns the value of the integer we using for our lock, except
|
||||
* with the bit we are using as a lock cleared, regardless of
|
||||
* whether the lock is held.
|
||||
*
|
||||
* It is 'safe' to call this without holding the lock. (As in: you
|
||||
* get the same guarantees for simultaneous accesses to an integer
|
||||
* as you normally get.)
|
||||
*/
|
||||
IntType getData() const {
|
||||
return static_cast<IntType>(lock_ & ~kLockBitMask_);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the value of the other bits in our integer.
|
||||
*
|
||||
* Don't use this when you aren't holding the lock, unless it can be
|
||||
* guaranteed that no other threads may be trying to use this.
|
||||
*/
|
||||
void setData(IntType w) {
|
||||
CHECK(!(w & kLockBitMask_));
|
||||
lock_ = (lock_ & kLockBitMask_) | w;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to get the lock without blocking: returns whether or not we
|
||||
* got it.
|
||||
*/
|
||||
bool try_lock() const {
|
||||
bool ret = false;
|
||||
|
||||
#ifdef _MSC_VER
|
||||
switch (sizeof(IntType)) {
|
||||
case 2:
|
||||
// There is no _interlockedbittestandset16 for some reason :(
|
||||
ret = _InterlockedOr16(
|
||||
(volatile short*)&lock_, (short)kLockBitMask_) & kLockBitMask_;
|
||||
break;
|
||||
case 4:
|
||||
ret = _interlockedbittestandset((volatile long*)&lock_, Bit);
|
||||
break;
|
||||
case 8:
|
||||
ret = _interlockedbittestandset64((volatile long long*)&lock_, Bit);
|
||||
break;
|
||||
}
|
||||
#elif FOLLY_X64
|
||||
#define FB_DOBTS(size) \
|
||||
asm volatile("lock; bts" #size " %1, (%2); setnc %0" \
|
||||
: "=r" (ret) \
|
||||
: "i" (Bit), \
|
||||
"r" (&lock_) \
|
||||
: "memory", "flags")
|
||||
|
||||
switch (sizeof(IntType)) {
|
||||
case 2: FB_DOBTS(w); break;
|
||||
case 4: FB_DOBTS(l); break;
|
||||
case 8: FB_DOBTS(q); break;
|
||||
}
|
||||
|
||||
#undef FB_DOBTS
|
||||
#elif FOLLY_A64
|
||||
ret = __atomic_fetch_or(&lock_, 1 << Bit, __ATOMIC_SEQ_CST);
|
||||
#elif FOLLY_PPC64
|
||||
#define FB_DOBTS(size) \
|
||||
asm volatile("\teieio\n" \
|
||||
"\tl" #size "arx 14,0,%[lockPtr]\n" \
|
||||
"\tli 15,1\n" \
|
||||
"\tsldi 15,15,%[bit]\n" \
|
||||
"\tand. 16,15,14\n" \
|
||||
"\tbne 0f\n" \
|
||||
"\tor 14,14,15\n" \
|
||||
"\tst" #size "cx. 14,0,%[lockPtr]\n" \
|
||||
"\tbne 0f\n" \
|
||||
"\tori %[output],%[output],1\n" \
|
||||
"\tisync\n" \
|
||||
"0:\n" \
|
||||
: [output] "+r" (ret) \
|
||||
: [lockPtr] "r"(&lock_), \
|
||||
[bit] "i" (Bit) \
|
||||
: "cr0", "memory", "r14", "r15", "r16")
|
||||
|
||||
switch (sizeof(IntType)) {
|
||||
case 2: FB_DOBTS(h); break;
|
||||
case 4: FB_DOBTS(w); break;
|
||||
case 8: FB_DOBTS(d); break;
|
||||
}
|
||||
|
||||
#undef FB_DOBTS
|
||||
#else
|
||||
#error "x86 aarch64 ppc64 only"
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Block until we can acquire the lock. Uses Sleeper to wait.
|
||||
*/
|
||||
void lock() const {
|
||||
detail::Sleeper sleeper;
|
||||
while (!try_lock()) {
|
||||
sleeper.wait();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Release the lock, without changing the value of the rest of the
|
||||
* integer.
|
||||
*/
|
||||
void unlock() const {
|
||||
#ifdef _MSC_VER
|
||||
switch (sizeof(IntType)) {
|
||||
case 2:
|
||||
// There is no _interlockedbittestandreset16 for some reason :(
|
||||
_InterlockedAnd16((volatile short*)&lock_, (short)~kLockBitMask_);
|
||||
break;
|
||||
case 4:
|
||||
_interlockedbittestandreset((volatile long*)&lock_, Bit);
|
||||
break;
|
||||
case 8:
|
||||
_interlockedbittestandreset64((volatile long long*)&lock_, Bit);
|
||||
break;
|
||||
}
|
||||
#elif FOLLY_X64
|
||||
#define FB_DOBTR(size) \
|
||||
asm volatile("lock; btr" #size " %0, (%1)" \
|
||||
: \
|
||||
: "i" (Bit), \
|
||||
"r" (&lock_) \
|
||||
: "memory", "flags")
|
||||
|
||||
|
||||
// Reads and writes can not be reordered wrt locked instructions,
|
||||
// so we don't need a memory fence here.
|
||||
switch (sizeof(IntType)) {
|
||||
case 2: FB_DOBTR(w); break;
|
||||
case 4: FB_DOBTR(l); break;
|
||||
case 8: FB_DOBTR(q); break;
|
||||
}
|
||||
|
||||
#undef FB_DOBTR
|
||||
#elif FOLLY_A64
|
||||
__atomic_fetch_and(&lock_, ~(1 << Bit), __ATOMIC_SEQ_CST);
|
||||
#elif FOLLY_PPC64
|
||||
#define FB_DOBTR(size) \
|
||||
asm volatile("\teieio\n" \
|
||||
"0: l" #size "arx 14,0,%[lockPtr]\n" \
|
||||
"\tli 15,1\n" \
|
||||
"\tsldi 15,15,%[bit]\n" \
|
||||
"\txor 14,14,15\n" \
|
||||
"\tst" #size "cx. 14,0,%[lockPtr]\n" \
|
||||
"\tbne 0b\n" \
|
||||
"\tisync\n" \
|
||||
: \
|
||||
: [lockPtr] "r"(&lock_), \
|
||||
[bit] "i" (Bit) \
|
||||
: "cr0", "memory", "r14", "r15")
|
||||
|
||||
switch (sizeof(IntType)) {
|
||||
case 2: FB_DOBTR(h); break;
|
||||
case 4: FB_DOBTR(w); break;
|
||||
case 8: FB_DOBTR(d); break;
|
||||
}
|
||||
|
||||
#undef FB_DOBTR
|
||||
#else
|
||||
# error "x64 aarch64 ppc64 only"
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
}
|
228
ios/Pods/Folly/folly/Poly-inl.h
generated
Normal file
228
ios/Pods/Folly/folly/Poly-inl.h
generated
Normal file
@ -0,0 +1,228 @@
|
||||
/*
|
||||
* Copyright 2017-present Facebook, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
namespace folly {
|
||||
namespace detail {
|
||||
|
||||
template <class I>
|
||||
inline PolyVal<I>::PolyVal(PolyVal&& that) noexcept {
|
||||
that.vptr_->ops_(Op::eMove, &that, static_cast<Data*>(this));
|
||||
vptr_ = std::exchange(that.vptr_, vtable<I>());
|
||||
}
|
||||
|
||||
template <class I>
|
||||
inline PolyVal<I>::PolyVal(PolyOrNonesuch const& that) {
|
||||
that.vptr_->ops_(
|
||||
Op::eCopy, const_cast<Data*>(that._data_()), PolyAccess::data(*this));
|
||||
vptr_ = that.vptr_;
|
||||
}
|
||||
|
||||
template <class I>
|
||||
inline PolyVal<I>::~PolyVal() {
|
||||
vptr_->ops_(Op::eNuke, this, nullptr);
|
||||
}
|
||||
|
||||
template <class I>
|
||||
inline Poly<I>& PolyVal<I>::operator=(PolyVal that) noexcept {
|
||||
vptr_->ops_(Op::eNuke, _data_(), nullptr);
|
||||
that.vptr_->ops_(Op::eMove, that._data_(), _data_());
|
||||
vptr_ = std::exchange(that.vptr_, vtable<I>());
|
||||
return static_cast<Poly<I>&>(*this);
|
||||
}
|
||||
|
||||
template <class I>
|
||||
template <class T, std::enable_if_t<ModelsInterface<T, I>::value, int>>
|
||||
inline PolyVal<I>::PolyVal(T&& t) {
|
||||
using U = std::decay_t<T>;
|
||||
static_assert(
|
||||
std::is_copy_constructible<U>::value || !Copyable::value,
|
||||
"This Poly<> requires copyability, and the source object is not "
|
||||
"copyable");
|
||||
// The static and dynamic types should match; otherwise, this will slice.
|
||||
assert(typeid(t) == typeid(_t<std::decay<T>>) ||
|
||||
!"Dynamic and static exception types don't match. Object would "
|
||||
"be sliced when storing in Poly.");
|
||||
if (inSitu<U>()) {
|
||||
::new (static_cast<void*>(&_data_()->buff_)) U(static_cast<T&&>(t));
|
||||
} else {
|
||||
_data_()->pobj_ = new U(static_cast<T&&>(t));
|
||||
}
|
||||
vptr_ = vtableFor<I, U>();
|
||||
}
|
||||
|
||||
template <class I>
|
||||
template <class I2, std::enable_if_t<ValueCompatible<I, I2>::value, int>>
|
||||
inline PolyVal<I>::PolyVal(Poly<I2> that) {
|
||||
static_assert(
|
||||
!Copyable::value || std::is_copy_constructible<Poly<I2>>::value,
|
||||
"This Poly<> requires copyability, and the source object is not "
|
||||
"copyable");
|
||||
auto* that_vptr = PolyAccess::vtable(that);
|
||||
if (that_vptr->state_ != State::eEmpty) {
|
||||
that_vptr->ops_(Op::eMove, PolyAccess::data(that), _data_());
|
||||
vptr_ = &select<I>(*std::exchange(that_vptr, vtable<std::decay_t<I2>>()));
|
||||
}
|
||||
}
|
||||
|
||||
template <class I>
|
||||
template <class T, std::enable_if_t<ModelsInterface<T, I>::value, int>>
|
||||
inline Poly<I>& PolyVal<I>::operator=(T&& t) {
|
||||
*this = PolyVal(static_cast<T&&>(t));
|
||||
return static_cast<Poly<I>&>(*this);
|
||||
}
|
||||
|
||||
template <class I>
|
||||
template <class I2, std::enable_if_t<ValueCompatible<I, I2>::value, int>>
|
||||
inline Poly<I>& PolyVal<I>::operator=(Poly<I2> that) {
|
||||
*this = PolyVal(std::move(that));
|
||||
return static_cast<Poly<I>&>(*this);
|
||||
}
|
||||
|
||||
template <class I>
|
||||
inline void PolyVal<I>::swap(Poly<I>& that) noexcept {
|
||||
switch (vptr_->state_) {
|
||||
case State::eEmpty:
|
||||
*this = std::move(that);
|
||||
break;
|
||||
case State::eOnHeap:
|
||||
if (State::eOnHeap == that.vptr_->state_) {
|
||||
std::swap(_data_()->pobj_, that._data_()->pobj_);
|
||||
std::swap(vptr_, that.vptr_);
|
||||
return;
|
||||
}
|
||||
FOLLY_FALLTHROUGH;
|
||||
case State::eInSitu:
|
||||
std::swap(
|
||||
*this, static_cast<PolyVal<I>&>(that)); // NOTE: qualified, not ADL
|
||||
}
|
||||
}
|
||||
|
||||
template <class I>
|
||||
inline AddCvrefOf<PolyRoot<I>, I>& PolyRef<I>::_polyRoot_() const noexcept {
|
||||
return const_cast<AddCvrefOf<PolyRoot<I>, I>&>(
|
||||
static_cast<PolyRoot<I> const&>(*this));
|
||||
}
|
||||
|
||||
template <class I>
|
||||
constexpr RefType PolyRef<I>::refType() noexcept {
|
||||
using J = std::remove_reference_t<I>;
|
||||
return std::is_rvalue_reference<I>::value
|
||||
? RefType::eRvalue
|
||||
: std::is_const<J>::value ? RefType::eConstLvalue : RefType::eLvalue;
|
||||
}
|
||||
|
||||
template <class I>
|
||||
template <class That, class I2>
|
||||
inline PolyRef<I>::PolyRef(That&& that, Type<I2>) {
|
||||
auto* that_vptr = PolyAccess::vtable(PolyAccess::root(that));
|
||||
detail::State const that_state = that_vptr->state_;
|
||||
if (that_state == State::eEmpty) {
|
||||
throw BadPolyAccess();
|
||||
}
|
||||
auto* that_data = PolyAccess::data(PolyAccess::root(that));
|
||||
_data_()->pobj_ = that_state == State::eInSitu
|
||||
? const_cast<void*>(static_cast<void const*>(&that_data->buff_))
|
||||
: that_data->pobj_;
|
||||
this->vptr_ = &select<std::decay_t<I>>(
|
||||
*static_cast<VTable<std::decay_t<I2>> const*>(that_vptr->ops_(
|
||||
Op::eRefr, nullptr, reinterpret_cast<void*>(refType()))));
|
||||
}
|
||||
|
||||
template <class I>
|
||||
inline PolyRef<I>::PolyRef(PolyRef const& that) noexcept {
|
||||
_data_()->pobj_ = that._data_()->pobj_;
|
||||
this->vptr_ = that.vptr_;
|
||||
}
|
||||
|
||||
template <class I>
|
||||
inline Poly<I>& PolyRef<I>::operator=(PolyRef const& that) noexcept {
|
||||
_data_()->pobj_ = that._data_()->pobj_;
|
||||
this->vptr_ = that.vptr_;
|
||||
return static_cast<Poly<I>&>(*this);
|
||||
}
|
||||
|
||||
template <class I>
|
||||
template <class T, std::enable_if_t<ModelsInterface<T, I>::value, int>>
|
||||
inline PolyRef<I>::PolyRef(T&& t) noexcept {
|
||||
_data_()->pobj_ =
|
||||
const_cast<void*>(static_cast<void const*>(std::addressof(t)));
|
||||
this->vptr_ = vtableFor<std::decay_t<I>, AddCvrefOf<std::decay_t<T>, I>>();
|
||||
}
|
||||
|
||||
template <class I>
|
||||
template <
|
||||
class I2,
|
||||
std::enable_if_t<ReferenceCompatible<I, I2, I2&&>::value, int>>
|
||||
inline PolyRef<I>::PolyRef(Poly<I2>&& that) noexcept(
|
||||
std::is_reference<I2>::value)
|
||||
: PolyRef{that, Type<I2>{}} {
|
||||
static_assert(
|
||||
Disjunction<std::is_reference<I2>, std::is_rvalue_reference<I>>::value,
|
||||
"Attempting to construct a Poly that is a reference to a temporary. "
|
||||
"This is probably a mistake.");
|
||||
}
|
||||
|
||||
template <class I>
|
||||
template <class T, std::enable_if_t<ModelsInterface<T, I>::value, int>>
|
||||
inline Poly<I>& PolyRef<I>::operator=(T&& t) noexcept {
|
||||
*this = PolyRef(static_cast<T&&>(t));
|
||||
return static_cast<Poly<I>&>(*this);
|
||||
}
|
||||
|
||||
template <class I>
|
||||
template <
|
||||
class I2,
|
||||
std::enable_if_t<ReferenceCompatible<I, I2, I2&&>::value, int>>
|
||||
inline Poly<I>& PolyRef<I>::operator=(Poly<I2>&& that) noexcept(
|
||||
std::is_reference<I2>::value) {
|
||||
*this = PolyRef(std::move(that));
|
||||
return static_cast<Poly<I>&>(*this);
|
||||
}
|
||||
|
||||
template <class I>
|
||||
template <
|
||||
class I2,
|
||||
std::enable_if_t<ReferenceCompatible<I, I2, I2&>::value, int>>
|
||||
inline Poly<I>& PolyRef<I>::operator=(Poly<I2>& that) noexcept(
|
||||
std::is_reference<I2>::value) {
|
||||
*this = PolyRef(that);
|
||||
return static_cast<Poly<I>&>(*this);
|
||||
}
|
||||
|
||||
template <class I>
|
||||
template <
|
||||
class I2,
|
||||
std::enable_if_t<ReferenceCompatible<I, I2, I2 const&>::value, int>>
|
||||
inline Poly<I>& PolyRef<I>::operator=(Poly<I2> const& that) noexcept(
|
||||
std::is_reference<I2>::value) {
|
||||
*this = PolyRef(that);
|
||||
return static_cast<Poly<I>&>(*this);
|
||||
}
|
||||
|
||||
template <class I>
|
||||
inline void PolyRef<I>::swap(Poly<I>& that) noexcept {
|
||||
std::swap(_data_()->pobj_, that._data_()->pobj_);
|
||||
std::swap(this->vptr_, that.vptr_);
|
||||
}
|
||||
|
||||
template <class I>
|
||||
inline AddCvrefOf<PolyImpl<I>, I>& PolyRef<I>::get() const noexcept {
|
||||
return const_cast<AddCvrefOf<PolyImpl<I>, I>&>(
|
||||
static_cast<PolyImpl<I> const&>(*this));
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
} // namespace folly
|
1171
ios/Pods/Folly/folly/Poly.h
generated
Normal file
1171
ios/Pods/Folly/folly/Poly.h
generated
Normal file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user